diff --git a/agent/retry_join.go b/agent/retry_join.go index c5a1c41bc95a..d58be4be2a7b 100644 --- a/agent/retry_join.go +++ b/agent/retry_join.go @@ -8,6 +8,7 @@ import ( "github.com/hashicorp/consul/lib" discover "github.com/hashicorp/go-discover" + discoverk8s "github.com/hashicorp/go-discover/provider/k8s" ) func (a *Agent) retryJoinLAN() { @@ -68,7 +69,17 @@ func (r *retryJoiner) retryJoin() error { return nil } - disco, err := discover.New(discover.WithUserAgent(lib.UserAgent())) + // Copy the default providers, and then add the non-default + providers := make(map[string]discover.Provider) + for k, v := range discover.Providers { + providers[k] = v + } + providers["k8s"] = &discoverk8s.Provider{} + + disco, err := discover.New( + discover.WithUserAgent(lib.UserAgent()), + discover.WithProviders(providers), + ) if err != nil { return err } diff --git a/vendor/cloud.google.com/go/LICENSE b/vendor/cloud.google.com/go/LICENSE index d64569567334..a4c5efd822fb 100644 --- a/vendor/cloud.google.com/go/LICENSE +++ b/vendor/cloud.google.com/go/LICENSE @@ -187,7 +187,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/cloud.google.com/go/compute/metadata/examples_test.go b/vendor/cloud.google.com/go/compute/metadata/examples_test.go new file mode 100644 index 000000000000..6c546a63c935 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/examples_test.go @@ -0,0 +1,46 @@ +// Copyright 2018 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata_test + +import ( + "net/http" + + "cloud.google.com/go/compute/metadata" +) + +// This example demonstrates how to use your own transport when using this package. +func ExampleNewClient() { + c := metadata.NewClient(&http.Client{Transport: userAgentTransport{ + userAgent: "my-user-agent", + base: http.DefaultTransport, + }}) + p, err := c.ProjectID() + if err != nil { + // TODO: Handle error. + } + _ = p // TODO: Use p. +} + +// userAgentTransport sets the User-Agent header before calling base. +type userAgentTransport struct { + userAgent string + base http.RoundTripper +} + +// RoundTrip implements the http.RoundTripper interface. +func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", t.userAgent) + return t.base.RoundTrip(req) +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata.go b/vendor/cloud.google.com/go/compute/metadata/metadata.go new file mode 100644 index 000000000000..9d0660be476d --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -0,0 +1,503 @@ +// Copyright 2014 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metadata provides access to Google Compute Engine (GCE) +// metadata and API service accounts. +// +// This package is a wrapper around the GCE metadata service, +// as documented at https://developers.google.com/compute/docs/metadata. +package metadata // import "cloud.google.com/go/compute/metadata" + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net" + "net/http" + "net/url" + "os" + "runtime" + "strings" + "sync" + "time" + + "golang.org/x/net/context" + "golang.org/x/net/context/ctxhttp" +) + +const ( + // metadataIP is the documented metadata server IP address. + metadataIP = "169.254.169.254" + + // metadataHostEnv is the environment variable specifying the + // GCE metadata hostname. If empty, the default value of + // metadataIP ("169.254.169.254") is used instead. + // This is variable name is not defined by any spec, as far as + // I know; it was made up for the Go package. + metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" +) + +type cachedValue struct { + k string + trim bool + mu sync.Mutex + v string +} + +var ( + projID = &cachedValue{k: "project/project-id", trim: true} + projNum = &cachedValue{k: "project/numeric-project-id", trim: true} + instID = &cachedValue{k: "instance/id", trim: true} +) + +var ( + defaultClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, + }, + }} + subscribeClient = &Client{hc: &http.Client{ + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + }, + }} +) + +// NotDefinedError is returned when requested metadata is not defined. +// +// The underlying string is the suffix after "/computeMetadata/v1/". +// +// This error is not returned if the value is defined to be the empty +// string. +type NotDefinedError string + +func (suffix NotDefinedError) Error() string { + return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix)) +} + +func (c *cachedValue) get(cl *Client) (v string, err error) { + defer c.mu.Unlock() + c.mu.Lock() + if c.v != "" { + return c.v, nil + } + if c.trim { + v, err = cl.getTrimmed(c.k) + } else { + v, err = cl.Get(c.k) + } + if err == nil { + c.v = v + } + return +} + +var ( + onGCEOnce sync.Once + onGCE bool +) + +// OnGCE reports whether this process is running on Google Compute Engine. +func OnGCE() bool { + onGCEOnce.Do(initOnGCE) + return onGCE +} + +func initOnGCE() { + onGCE = testOnGCE() +} + +func testOnGCE() bool { + // The user explicitly said they're on GCE, so trust them. + if os.Getenv(metadataHostEnv) != "" { + return true + } + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + resc := make(chan bool, 2) + + // Try two strategies in parallel. + // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 + go func() { + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, defaultClient.hc, req) + if err != nil { + resc <- false + return + } + defer res.Body.Close() + resc <- res.Header.Get("Metadata-Flavor") == "Google" + }() + + go func() { + addrs, err := net.LookupHost("metadata.google.internal") + if err != nil || len(addrs) == 0 { + resc <- false + return + } + resc <- strsContains(addrs, metadataIP) + }() + + tryHarder := systemInfoSuggestsGCE() + if tryHarder { + res := <-resc + if res { + // The first strategy succeeded, so let's use it. + return true + } + // Wait for either the DNS or metadata server probe to + // contradict the other one and say we are running on + // GCE. Give it a lot of time to do so, since the system + // info already suggests we're running on a GCE BIOS. + timer := time.NewTimer(5 * time.Second) + defer timer.Stop() + select { + case res = <-resc: + return res + case <-timer.C: + // Too slow. Who knows what this system is. + return false + } + } + + // There's no hint from the system info that we're running on + // GCE, so use the first probe's result as truth, whether it's + // true or false. The goal here is to optimize for speed for + // users who are NOT running on GCE. We can't assume that + // either a DNS lookup or an HTTP request to a blackholed IP + // address is fast. Worst case this should return when the + // metaClient's Transport.ResponseHeaderTimeout or + // Transport.Dial.Timeout fires (in two seconds). + return <-resc +} + +// systemInfoSuggestsGCE reports whether the local system (without +// doing network requests) suggests that we're running on GCE. If this +// returns true, testOnGCE tries a bit harder to reach its metadata +// server. +func systemInfoSuggestsGCE() bool { + if runtime.GOOS != "linux" { + // We don't have any non-Linux clues available, at least yet. + return false + } + slurp, _ := ioutil.ReadFile("/sys/class/dmi/id/product_name") + name := strings.TrimSpace(string(slurp)) + return name == "Google" || name == "Google Compute Engine" +} + +// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no +// ResponseHeaderTimeout). +func Subscribe(suffix string, fn func(v string, ok bool) error) error { + return subscribeClient.Subscribe(suffix, fn) +} + +// Get calls Client.Get on the default client. +func Get(suffix string) (string, error) { return defaultClient.Get(suffix) } + +// ProjectID returns the current instance's project ID string. +func ProjectID() (string, error) { return defaultClient.ProjectID() } + +// NumericProjectID returns the current instance's numeric project ID. +func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() } + +// InternalIP returns the instance's primary internal IP address. +func InternalIP() (string, error) { return defaultClient.InternalIP() } + +// ExternalIP returns the instance's primary external (public) IP address. +func ExternalIP() (string, error) { return defaultClient.ExternalIP() } + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func Hostname() (string, error) { return defaultClient.Hostname() } + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() } + +// InstanceID returns the current VM's numeric instance ID. +func InstanceID() (string, error) { return defaultClient.InstanceID() } + +// InstanceName returns the current VM's instance ID string. +func InstanceName() (string, error) { return defaultClient.InstanceName() } + +// Zone returns the current VM's zone, such as "us-central1-b". +func Zone() (string, error) { return defaultClient.Zone() } + +// InstanceAttributes calls Client.InstanceAttributes on the default client. +func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() } + +// ProjectAttributes calls Client.ProjectAttributes on the default client. +func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() } + +// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client. +func InstanceAttributeValue(attr string) (string, error) { + return defaultClient.InstanceAttributeValue(attr) +} + +// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client. +func ProjectAttributeValue(attr string) (string, error) { + return defaultClient.ProjectAttributeValue(attr) +} + +// Scopes calls Client.Scopes on the default client. +func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) } + +func strsContains(ss []string, s string) bool { + for _, v := range ss { + if v == s { + return true + } + } + return false +} + +// A Client provides metadata. +type Client struct { + hc *http.Client +} + +// NewClient returns a Client that can be used to fetch metadata. All HTTP requests +// will use the given http.Client instead of the default client. +func NewClient(c *http.Client) *Client { + return &Client{hc: c} +} + +// getETag returns a value from the metadata service as well as the associated ETag. +// This func is otherwise equivalent to Get. +func (c *Client) getETag(suffix string) (value, etag string, err error) { + // Using a fixed IP makes it very difficult to spoof the metadata service in + // a container, which is an important use-case for local testing of cloud + // deployments. To enable spoofing of the metadata service, the environment + // variable GCE_METADATA_HOST is first inspected to decide where metadata + // requests shall go. + host := os.Getenv(metadataHostEnv) + if host == "" { + // Using 169.254.169.254 instead of "metadata" here because Go + // binaries built with the "netgo" tag and without cgo won't + // know the search suffix for "metadata" is + // ".google.internal", and this IP address is documented as + // being stable anyway. + host = metadataIP + } + url := "http://" + host + "/computeMetadata/v1/" + suffix + req, _ := http.NewRequest("GET", url, nil) + req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) + res, err := c.hc.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + if res.StatusCode == http.StatusNotFound { + return "", "", NotDefinedError(suffix) + } + if res.StatusCode != 200 { + return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url) + } + all, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + return string(all), res.Header.Get("Etag"), nil +} + +// Get returns a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// +// If the GCE_METADATA_HOST environment variable is not defined, a default of +// 169.254.169.254 will be used instead. +// +// If the requested metadata is not defined, the returned error will +// be of type NotDefinedError. +func (c *Client) Get(suffix string) (string, error) { + val, _, err := c.getETag(suffix) + return val, err +} + +func (c *Client) getTrimmed(suffix string) (s string, err error) { + s, err = c.Get(suffix) + s = strings.TrimSpace(s) + return +} + +func (c *Client) lines(suffix string) ([]string, error) { + j, err := c.Get(suffix) + if err != nil { + return nil, err + } + s := strings.Split(strings.TrimSpace(j), "\n") + for i := range s { + s[i] = strings.TrimSpace(s[i]) + } + return s, nil +} + +// ProjectID returns the current instance's project ID string. +func (c *Client) ProjectID() (string, error) { return projID.get(c) } + +// NumericProjectID returns the current instance's numeric project ID. +func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) } + +// InstanceID returns the current VM's numeric instance ID. +func (c *Client) InstanceID() (string, error) { return instID.get(c) } + +// InternalIP returns the instance's primary internal IP address. +func (c *Client) InternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/ip") +} + +// ExternalIP returns the instance's primary external (public) IP address. +func (c *Client) ExternalIP() (string, error) { + return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip") +} + +// Hostname returns the instance's hostname. This will be of the form +// ".c..internal". +func (c *Client) Hostname() (string, error) { + return c.getTrimmed("instance/hostname") +} + +// InstanceTags returns the list of user-defined instance tags, +// assigned when initially creating a GCE instance. +func (c *Client) InstanceTags() ([]string, error) { + var s []string + j, err := c.Get("instance/tags") + if err != nil { + return nil, err + } + if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil { + return nil, err + } + return s, nil +} + +// InstanceName returns the current VM's instance ID string. +func (c *Client) InstanceName() (string, error) { + host, err := c.Hostname() + if err != nil { + return "", err + } + return strings.Split(host, ".")[0], nil +} + +// Zone returns the current VM's zone, such as "us-central1-b". +func (c *Client) Zone() (string, error) { + zone, err := c.getTrimmed("instance/zone") + // zone is of the form "projects//zones/". + if err != nil { + return "", err + } + return zone[strings.LastIndex(zone, "/")+1:], nil +} + +// InstanceAttributes returns the list of user-defined attributes, +// assigned when initially creating a GCE VM instance. The value of an +// attribute can be obtained with InstanceAttributeValue. +func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") } + +// ProjectAttributes returns the list of user-defined attributes +// applying to the project as a whole, not just this VM. The value of +// an attribute can be obtained with ProjectAttributeValue. +func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") } + +// InstanceAttributeValue returns the value of the provided VM +// instance attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// InstanceAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) InstanceAttributeValue(attr string) (string, error) { + return c.Get("instance/attributes/" + attr) +} + +// ProjectAttributeValue returns the value of the provided +// project attribute. +// +// If the requested attribute is not defined, the returned error will +// be of type NotDefinedError. +// +// ProjectAttributeValue may return ("", nil) if the attribute was +// defined to be the empty string. +func (c *Client) ProjectAttributeValue(attr string) (string, error) { + return c.Get("project/attributes/" + attr) +} + +// Scopes returns the service account scopes for the given account. +// The account may be empty or the string "default" to use the instance's +// main account. +func (c *Client) Scopes(serviceAccount string) ([]string, error) { + if serviceAccount == "" { + serviceAccount = "default" + } + return c.lines("instance/service-accounts/" + serviceAccount + "/scopes") +} + +// Subscribe subscribes to a value from the metadata service. +// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/". +// The suffix may contain query parameters. +// +// Subscribe calls fn with the latest metadata value indicated by the provided +// suffix. If the metadata value is deleted, fn is called with the empty string +// and ok false. Subscribe blocks until fn returns a non-nil error or the value +// is deleted. Subscribe returns the error value returned from the last call to +// fn, which may be nil when ok == false. +func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error { + const failedSubscribeSleep = time.Second * 5 + + // First check to see if the metadata value exists at all. + val, lastETag, err := c.getETag(suffix) + if err != nil { + return err + } + + if err := fn(val, true); err != nil { + return err + } + + ok := true + if strings.ContainsRune(suffix, '?') { + suffix += "&wait_for_change=true&last_etag=" + } else { + suffix += "?wait_for_change=true&last_etag=" + } + for { + val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag)) + if err != nil { + if _, deleted := err.(NotDefinedError); !deleted { + time.Sleep(failedSubscribeSleep) + continue // Retry on other errors. + } + ok = false + } + lastETag = etag + + if err := fn(val, ok); err != nil || !ok { + return err + } + } +} diff --git a/vendor/cloud.google.com/go/compute/metadata/metadata_test.go b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go new file mode 100644 index 000000000000..83142b26bc86 --- /dev/null +++ b/vendor/cloud.google.com/go/compute/metadata/metadata_test.go @@ -0,0 +1,80 @@ +// Copyright 2016 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metadata + +import ( + "bytes" + "io/ioutil" + "net/http" + "os" + "sync" + "testing" +) + +func TestOnGCE_Stress(t *testing.T) { + if testing.Short() { + t.Skip("skipping in -short mode") + } + var last bool + for i := 0; i < 100; i++ { + onGCEOnce = sync.Once{} + + now := OnGCE() + if i > 0 && now != last { + t.Errorf("%d. changed from %v to %v", i, last, now) + } + last = now + } + t.Logf("OnGCE() = %v", last) +} + +func TestOnGCE_Force(t *testing.T) { + onGCEOnce = sync.Once{} + old := os.Getenv(metadataHostEnv) + defer os.Setenv(metadataHostEnv, old) + os.Setenv(metadataHostEnv, "127.0.0.1") + if !OnGCE() { + t.Error("OnGCE() = false; want true") + } +} + +func TestOverrideUserAgent(t *testing.T) { + const userAgent = "my-user-agent" + rt := &rrt{} + c := NewClient(&http.Client{Transport: userAgentTransport{userAgent, rt}}) + c.Get("foo") + if got, want := rt.gotUserAgent, userAgent; got != want { + t.Errorf("got %q, want %q", got, want) + } +} + +type userAgentTransport struct { + userAgent string + base http.RoundTripper +} + +func (t userAgentTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("User-Agent", t.userAgent) + return t.base.RoundTrip(req) +} + +type rrt struct { + gotUserAgent string +} + +func (r *rrt) RoundTrip(req *http.Request) (*http.Response, error) { + r.gotUserAgent = req.Header.Get("User-Agent") + return &http.Response{Body: ioutil.NopCloser(bytes.NewReader(nil))}, nil +} diff --git a/vendor/github.com/Azure/go-autorest/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/Azure/go-autorest/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..b9440428275b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Thank you for your contribution to Go-AutoRest! We will triage and review it as soon as we can. + +As part of submitting, please make sure you can make the following assertions: + - [ ] I've tested my changes, adding unit tests if applicable. + - [ ] I've added Apache 2.0 Headers to the top of any new source files. + - [ ] I'm submitting this PR to the `dev` branch, except in the case of urgent bug fixes warranting their own release. + - [ ] If I'm targeting `master`, I've updated [CHANGELOG.md](https://github.com/Azure/go-autorest/blob/master/CHANGELOG.md) to address the changes I'm making. \ No newline at end of file diff --git a/vendor/github.com/Azure/go-autorest/.gitignore b/vendor/github.com/Azure/go-autorest/.gitignore new file mode 100644 index 000000000000..ab262cbe56ba --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.gitignore @@ -0,0 +1,31 @@ +# The standard Go .gitignore file follows. (Sourced from: github.com/github/gitignore/master/Go.gitignore) +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.DS_Store +.idea/ + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# go-autorest specific +vendor/ +autorest/azure/example/example diff --git a/vendor/github.com/Azure/go-autorest/.travis.yml b/vendor/github.com/Azure/go-autorest/.travis.yml new file mode 100644 index 000000000000..2664a205698f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/.travis.yml @@ -0,0 +1,35 @@ +sudo: false + +language: go + +go: + - master + - 1.11.x + - 1.10.x + - 1.9.x + - 1.8.x + +matrix: + allow_failures: + - go: master + +env: + - DEP_VERSION="0.5.0" + +before_install: + - curl -L -o $GOPATH/bin/dep https://github.com/golang/dep/releases/download/v$DEP_VERSION/dep-linux-amd64 && chmod +x $GOPATH/bin/dep + +install: + - go get -u github.com/golang/lint/golint + - go get -u github.com/stretchr/testify + - go get -u github.com/GoASTScanner/gas + - dep ensure + +script: + - grep -L -r --include *.go --exclude-dir vendor -P "Copyright (\d{4}|\(c\)) Microsoft" ./ | tee /dev/stderr | test -z "$(< /dev/stdin)" + - if [[ $TRAVIS_GO_VERSION == 1.11* ]]; then test -z "$(gofmt -s -l -w ./autorest/. | tee /dev/stderr)"; fi + - test -z "$(golint ./autorest/... | tee /dev/stderr)" + - go vet ./autorest/... + - test -z "$(gas ./autorest/... | tee /dev/stderr | grep Error)" + - go build -v ./autorest/... + - go test -v ./autorest/... diff --git a/vendor/github.com/Azure/go-autorest/CHANGELOG.md b/vendor/github.com/Azure/go-autorest/CHANGELOG.md new file mode 100644 index 000000000000..a788939c486e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/CHANGELOG.md @@ -0,0 +1,587 @@ +# CHANGELOG + +## v10.15.3 + +### Bug Fixes + +- Initialize the polling URL and method for an LRO tracker on each iteration, favoring the Azure-AsyncOperation header. + +## v10.15.2 + +### Bug Fixes + +- Use fmt.Fprint when printing request/response so that any escape sequences aren't treated as format specifiers. + +## v10.15.1 + +### Bug Fixes + +- If an LRO API returns a ```Failed``` provisioning state in the initial response return an error at that point so the caller doesn't have to poll. +- For failed LROs without an OData v4 error include the response body in the error's ```AdditionalInfo``` field to aid in diagnosing the failure. + +## v10.15.0 + +### New Features + +- Add initial support for request/response logging via setting environment variables. + Setting ```AZURE_GO_SDK_LOG_LEVEL``` to ```LogInfo``` will log request/response + without their bodies. To include the bodies set the log level to ```LogDebug```. + By default the logger writes to strerr, however it can also write to stdout or a file + if specified in ```AZURE_GO_SDK_LOG_FILE```. Note that if the specified file + already exists it will be truncated. + IMPORTANT: by default the logger will redact the Authorization and Ocp-Apim-Subscription-Key + headers. Any other secrets will *not* be redacted. + +## v10.14.0 + +### New Features + +- Added package version that contains version constants and user-agent data. + +### Bug Fixes + +- Add the user-agent to token requests. + +## v10.13.0 + +- Added support for additionalInfo in ServiceError type. + +## v10.12.0 + +### New Features + +- Added field ServicePrincipalToken.MaxMSIRefreshAttempts to configure the maximun number of attempts to refresh an MSI token. + +## v10.11.4 + +### Bug Fixes + +- If an LRO returns http.StatusOK on the initial response with no async headers return the response body from Future.GetResult(). +- If there is no "final GET URL" return an error from Future.GetResult(). + +## v10.11.3 + +### Bug Fixes + +- In IMDS retry logic, if we don't receive a response don't retry. + - Renamed the retry function so it's clear it's meant for IMDS only. +- For error response bodies that aren't OData-v4 compliant stick the raw JSON in the ServiceError.Details field so the information isn't lost. + - Also add the raw HTTP response to the DetailedResponse. +- Removed superfluous wrapping of response error in azure.DoRetryWithRegistration(). + +## v10.11.2 + +### Bug Fixes + +- Validation for integers handles int and int64 types. + +## v10.11.1 + +### Bug Fixes + +- Adding User information to authorization config as parsed from CLI cache. + +## v10.11.0 + +### New Features + +- Added NewServicePrincipalTokenFromManualTokenSecret for creating a new SPT using a manual token and secret +- Added method ServicePrincipalToken.MarshalTokenJSON() to marshall the inner Token + +## v10.10.0 + +### New Features + +- Most ServicePrincipalTokens can now be marshalled/unmarshall to/from JSON (ServicePrincipalCertificateSecret and ServicePrincipalMSISecret are not supported). +- Added method ServicePrincipalToken.SetRefreshCallbacks(). + +## v10.9.2 + +### Bug Fixes + +- Refreshing a refresh token obtained from a web app authorization code now works. + +## v10.9.1 + +### Bug Fixes + +- The retry logic for MSI token requests now uses exponential backoff per the guidelines. +- IsTemporaryNetworkError() will return true for errors that don't implement the net.Error interface. + +## v10.9.0 + +### Deprecated Methods + +| Old Method | New Method | +|-------------:|:-----------:| +|azure.NewFuture() | azure.NewFutureFromResponse()| +|Future.WaitForCompletion() | Future.WaitForCompletionRef()| + +### New Features + +- Added azure.NewFutureFromResponse() for creating a Future from the initial response from an async operation. +- Added Future.GetResult() for making the final GET call to retrieve the result from an async operation. + +### Bug Fixes + +- Some futures failed to return their results, this should now be fixed. + +## v10.8.2 + +### Bug Fixes + +- Add nil-gaurd to token retry logic. + +## v10.8.1 + +### Bug Fixes + +- Return a TokenRefreshError if the sender fails on the initial request. +- Don't retry on non-temporary network errors. + +## v10.8.0 + +- Added NewAuthorizerFromEnvironmentWithResource() helper function. + +## v10.7.0 + +### New Features + +- Added *WithContext() methods to ADAL token refresh operations. + +## v10.6.2 + +- Fixed a bug on device authentication. + +## v10.6.1 + +- Added retries to MSI token get request. + +## v10.6.0 + +- Changed MSI token implementation. Now, the token endpoint is the IMDS endpoint. + +## v10.5.1 + +### Bug Fixes + +- `DeviceFlowConfig.Authorizer()` now prints the device code message when running `go test`. `-v` flag is required. + +## v10.5.0 + +### New Features + +- Added NewPollingRequestWithContext() for use with polling asynchronous operations. + +### Bug Fixes + +- Make retry logic use the request's context instead of the deprecated Cancel object. + +## v10.4.0 + +### New Features +- Added helper for parsing Azure Resource ID's. +- Added deprecation message to utils.GetEnvVarOrExit() + +## v10.3.0 + +### New Features +- Added EnvironmentFromURL method to load an Environment from a given URL. This function is particularly useful in the private and hybrid Cloud model, where one may define their own endpoints +- Added TokenAudience endpoint to Environment structure. This is useful in private and hybrid cloud models where TokenAudience endpoint can be different from ResourceManagerEndpoint + +## v10.2.0 + +### New Features + +- Added endpoints for batch management. + +## v10.1.3 + +### Bug Fixes + +- In Client.Do() invoke WithInspection() last so that it will inspect WithAuthorization(). +- Fixed authorization methods to invoke p.Prepare() first, aligning them with the other preparers. + +## v10.1.2 + +- Corrected comment for auth.NewAuthorizerFromFile() function. + +## v10.1.1 + +- Updated version number to match current release. + +## v10.1.0 + +### New Features + +- Expose the polling URL for futures. + +### Bug Fixes + +- Add validation.NewErrorWithValidationError back to prevent breaking changes (it is deprecated). + +## v10.0.0 + +### New Features + +- Added target and innererror fields to ServiceError to comply with OData v4 spec. +- The Done() method on futures will now return a ServiceError object when available (it used to return a partial value of such errors). +- Added helper methods for obtaining authorizers. +- Expose the polling URL for futures. + +### Bug Fixes + +- Switched from glide to dep for dependency management. +- Fixed unmarshaling of ServiceError for JSON bodies that don't conform to the OData spec. +- Fixed a race condition in token refresh. + +### Breaking Changes + +- The ServiceError.Details field type has been changed to match the OData v4 spec. +- Go v1.7 has been dropped from CI. +- API parameter validation failures will now return a unique error type validation.Error. +- The adal.Token type has been decomposed from adal.ServicePrincipalToken (this was necessary in order to fix the token refresh race). + +## v9.10.0 +- Fix the Service Bus suffix in Azure public env +- Add Service Bus Endpoint (AAD ResourceURI) for use in [Azure Service Bus RBAC Preview](https://docs.microsoft.com/en-us/azure/service-bus-messaging/service-bus-role-based-access-control) + +## v9.9.0 + +### New Features + +- Added EventGridKeyAuthorizer for key authorization with event grid topics. + +### Bug Fixes + +- Fixed race condition when auto-refreshing service principal tokens. + +## v9.8.1 + +### Bug Fixes + +- Added http.StatusNoContent (204) to the list of expected status codes for long-running operations. +- Updated runtime version info so it's current. + +## v9.8.0 + +### New Features + +- Added type azure.AsyncOpIncompleteError to be returned from a future's Result() method when the operation has not completed. + +## v9.7.1 + +### Bug Fixes + +- Use correct AAD and Graph endpoints for US Gov environment. + +## v9.7.0 + +### New Features + +- Added support for application/octet-stream MIME types. + +## v9.6.1 + +### Bug Fixes + +- Ensure Authorization header is added to request when polling for registration status. + +## v9.6.0 + +### New Features + +- Added support for acquiring tokens via MSI with a user assigned identity. + +## v9.5.3 + +### Bug Fixes +- Don't remove encoding of existing URL Query parameters when calling autorest.WithQueryParameters. +- Set correct Content Type when using autorest.WithFormData. + +## v9.5.2 + +### Bug Fixes + +- Check for nil *http.Response before dereferencing it. + +## v9.5.1 + +### Bug Fixes + +- Don't count http.StatusTooManyRequests (429) against the retry cap. +- Use retry logic when SkipResourceProviderRegistration is set to true. + +## v9.5.0 + +### New Features + +- Added support for username + password, API key, authoriazation code and cognitive services authentication. +- Added field SkipResourceProviderRegistration to clients to provide a way to skip auto-registration of RPs. +- Added utility function AsStringSlice() to convert its parameters to a string slice. + +### Bug Fixes + +- When checking for authentication failures look at the error type not the status code as it could vary. + +## v9.4.2 + +### Bug Fixes + +- Validate parameters when creating credentials. +- Don't retry requests if the returned status is a 401 (http.StatusUnauthorized) as it will never succeed. + +## v9.4.1 + +### Bug Fixes + +- Update the AccessTokensPath() to read access tokens path through AZURE_ACCESS_TOKEN_FILE. If this + environment variable is not set, it will fall back to use default path set by Azure CLI. +- Use case-insensitive string comparison for polling states. + +## v9.4.0 + +### New Features + +- Added WaitForCompletion() to Future as a default polling implementation. + +### Bug Fixes + +- Method Future.Done() shouldn't update polling status for unexpected HTTP status codes. + +## v9.3.1 + +### Bug Fixes + +- DoRetryForStatusCodes will retry if sender.Do returns a non-nil error. + +## v9.3.0 + +### New Features + +- Added PollingMethod() to Future so callers know what kind of polling mechanism is used. +- Added azure.ChangeToGet() which transforms an http.Request into a GET (to be used with LROs). + +## v9.2.0 + +### New Features + +- Added support for custom Azure Stack endpoints. +- Added type azure.Future used to track the status of long-running operations. + +### Bug Fixes + +- Preserve the original error in DoRetryWithRegistration when registration fails. + +## v9.1.1 + +- Fixes a bug regarding the cookie jar on `autorest.Client.Sender`. + +## v9.1.0 + +### New Features + +- In cases where there is a non-empty error from the service, attempt to unmarshal it instead of uniformly calling it an "Unknown" error. +- Support for loading Azure CLI Authentication files. +- Automatically register your subscription with the Azure Resource Provider if it hadn't been previously. + +### Bug Fixes + + - RetriableRequest can now tolerate a ReadSeekable body being read but not reset. + - Adding missing Apache Headers + +## v9.0.0 + +> **IMPORTANT:** This release was intially labeled incorrectly as `v8.4.0`. From the time it was released, it should have been marked `v9.0.0` because it contains breaking changes to the MSI packages. We appologize for any inconvenience this causes. + +Adding MSI Endpoint Support and CLI token rehydration. + +## v8.3.1 + +Pick up bug fix in adal for MSI support. + +## v8.3.0 + +Updates to Error string formats for clarity. Also, adding a copy of the http.Response to errors for an improved debugging experience. + +## v8.2.0 + +### New Features + +- Add support for bearer authentication callbacks +- Support 429 response codes that include "Retry-After" header +- Support validation constraint "Pattern" for map keys + +### Bug Fixes + +- Make RetriableRequest work with multiple versions of Go + +## v8.1.1 +Updates the RetriableRequest to take advantage of GetBody() added in Go 1.8. + +## v8.1.0 +Adds RetriableRequest type for more efficient handling of retrying HTTP requests. + +## v8.0.0 + +ADAL refactored into its own package. +Support for UNIX time. + +## v7.3.1 +- Version Testing now removed from production bits that are shipped with the library. + +## v7.3.0 +- Exposing new `RespondDecorator`, `ByDiscardingBody`. This allows operations + to acknowledge that they do not need either the entire or a trailing portion + of accepts response body. In doing so, Go's http library can reuse HTTP + connections more readily. +- Adding `PrepareDecorator` to target custom BaseURLs. +- Adding ACR suffix to public cloud environment. +- Updating Glide dependencies. + +## v7.2.5 +- Fixed the Active Directory endpoint for the China cloud. +- Removes UTF-8 BOM if present in response payload. +- Added telemetry. + +## v7.2.3 +- Fixing bug in calls to `DelayForBackoff` that caused doubling of delay + duration. + +## v7.2.2 +- autorest/azure: added ASM and ARM VM DNS suffixes. + +## v7.2.1 +- fixed parsing of UTC times that are not RFC3339 conformant. + +## v7.2.0 +- autorest/validation: Reformat validation error for better error message. + +## v7.1.0 +- preparer: Added support for multipart formdata - WithMultiPartFormdata() +- preparer: Added support for sending file in request body - WithFile +- client: Added RetryDuration parameter. +- autorest/validation: new package for validation code for Azure Go SDK. + +## v7.0.7 +- Add trailing / to endpoint +- azure: add EnvironmentFromName + +## v7.0.6 +- Add retry logic for 408, 500, 502, 503 and 504 status codes. +- Change url path and query encoding logic. +- Fix DelayForBackoff for proper exponential delay. +- Add CookieJar in Client. + +## v7.0.5 +- Add check to start polling only when status is in [200,201,202]. +- Refactoring for unchecked errors. +- azure/persist changes. +- Fix 'file in use' issue in renewing token in deviceflow. +- Store header RetryAfter for subsequent requests in polling. +- Add attribute details in service error. + +## v7.0.4 +- Better error messages for long running operation failures + +## v7.0.3 +- Corrected DoPollForAsynchronous to properly handle the initial response + +## v7.0.2 +- Corrected DoPollForAsynchronous to continue using the polling method first discovered + +## v7.0.1 +- Fixed empty JSON input error in ByUnmarshallingJSON +- Fixed polling support for GET calls +- Changed format name from TimeRfc1123 to TimeRFC1123 + +## v7.0.0 +- Added ByCopying responder with supporting TeeReadCloser +- Rewrote Azure asynchronous handling +- Reverted to only unmarshalling JSON +- Corrected handling of RFC3339 time strings and added support for Rfc1123 time format + +The `json.Decoder` does not catch bad data as thoroughly as `json.Unmarshal`. Since +`encoding/json` successfully deserializes all core types, and extended types normally provide +their custom JSON serialization handlers, the code has been reverted back to using +`json.Unmarshal`. The original change to use `json.Decode` was made to reduce duplicate +code; there is no loss of function, and there is a gain in accuracy, by reverting. + +Additionally, Azure services indicate requests to be polled by multiple means. The existing code +only checked for one of those (that is, the presence of the `Azure-AsyncOperation` header). +The new code correctly covers all cases and aligns with the other Azure SDKs. + +## v6.1.0 +- Introduced `date.ByUnmarshallingJSONDate` and `date.ByUnmarshallingJSONTime` to enable JSON encoded values. + +## v6.0.0 +- Completely reworked the handling of polled and asynchronous requests +- Removed unnecessary routines +- Reworked `mocks.Sender` to replay a series of `http.Response` objects +- Added `PrepareDecorators` for primitive types (e.g., bool, int32) + +Handling polled and asynchronous requests is no longer part of `Client#Send`. Instead new +`SendDecorators` implement different styles of polled behavior. See`autorest.DoPollForStatusCodes` +and `azure.DoPollForAsynchronous` for examples. + +## v5.0.0 +- Added new RespondDecorators unmarshalling primitive types +- Corrected application of inspection and authorization PrependDecorators + +## v4.0.0 +- Added support for Azure long-running operations. +- Added cancelation support to all decorators and functions that may delay. +- Breaking: `DelayForBackoff` now accepts a channel, which may be nil. + +## v3.1.0 +- Add support for OAuth Device Flow authorization. +- Add support for ServicePrincipalTokens that are backed by an existing token, rather than other secret material. +- Add helpers for persisting and restoring Tokens. +- Increased code coverage in the github.com/Azure/autorest/azure package + +## v3.0.0 +- Breaking: `NewErrorWithError` no longer takes `statusCode int`. +- Breaking: `NewErrorWithStatusCode` is replaced with `NewErrorWithResponse`. +- Breaking: `Client#Send()` no longer takes `codes ...int` argument. +- Add: XML unmarshaling support with `ByUnmarshallingXML()` +- Stopped vending dependencies locally and switched to [Glide](https://github.com/Masterminds/glide). + Applications using this library should either use Glide or vendor dependencies locally some other way. +- Add: `azure.WithErrorUnlessStatusCode()` decorator to handle Azure errors. +- Fix: use `net/http.DefaultClient` as base client. +- Fix: Missing inspection for polling responses added. +- Add: CopyAndDecode helpers. +- Improved `./autorest/to` with `[]string` helpers. +- Removed golint suppressions in .travis.yml. + +## v2.1.0 + +- Added `StatusCode` to `Error` for more easily obtaining the HTTP Reponse StatusCode (if any) + +## v2.0.0 + +- Changed `to.StringMapPtr` method signature to return a pointer +- Changed `ServicePrincipalCertificateSecret` and `NewServicePrincipalTokenFromCertificate` to support generic certificate and private keys + +## v1.0.0 + +- Added Logging inspectors to trace http.Request / Response +- Added support for User-Agent header +- Changed WithHeader PrepareDecorator to use set vs. add +- Added JSON to error when unmarshalling fails +- Added Client#Send method +- Corrected case of "Azure" in package paths +- Added "to" helpers, Azure helpers, and improved ease-of-use +- Corrected golint issues + +## v1.0.1 + +- Added CHANGELOG.md + +## v1.1.0 + +- Added mechanism to retrieve a ServicePrincipalToken using a certificate-signed JWT +- Added an example of creating a certificate-based ServicePrincipal and retrieving an OAuth token using the certificate + +## v1.1.1 + +- Introduce godeps and vendor dependencies introduced in v1.1.1 diff --git a/vendor/github.com/Azure/go-autorest/GNUmakefile b/vendor/github.com/Azure/go-autorest/GNUmakefile new file mode 100644 index 000000000000..a434e73ac49d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/GNUmakefile @@ -0,0 +1,23 @@ +DIR?=./autorest/ + +default: build + +build: fmt + go install $(DIR) + +test: + go test $(DIR) || exit 1 + +vet: + @echo "go vet ." + @go vet $(DIR)... ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +fmt: + gofmt -w $(DIR) + +.PHONY: build test vet fmt diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.lock b/vendor/github.com/Azure/go-autorest/Gopkg.lock new file mode 100644 index 000000000000..4cf49542fcbd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.lock @@ -0,0 +1,77 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:0deddd908b6b4b768cfc272c16ee61e7088a60f7fe2f06c547bd3d8e1f8b8e77" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + digest = "1:6098222470fe0172157ce9bbef5d2200df4edde17ee649c5d6e48330e4afa4c6" + name = "github.com/dgrijalva/jwt-go" + packages = ["."] + pruneopts = "" + revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e" + version = "v3.2.0" + +[[projects]] + branch = "master" + digest = "1:7f175a633086a933d1940a7e7dc2154a0070a7c25fb4a2f671f3eef1a34d1fd7" + name = "github.com/dimchansky/utfbom" + packages = ["."] + pruneopts = "" + revision = "5448fe645cb1964ba70ac8f9f2ffe975e61a536c" + +[[projects]] + branch = "master" + digest = "1:096a8a9182648da3d00ff243b88407838902b6703fc12657f76890e08d1899bf" + name = "github.com/mitchellh/go-homedir" + packages = ["."] + pruneopts = "" + revision = "ae18d6b8b3205b561c79e8e5f69bff09736185f4" + +[[projects]] + digest = "1:256484dbbcd271f9ecebc6795b2df8cad4c458dd0f5fd82a8c2fa0c29f233411" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:c587772fb8ad29ad4db67575dad25ba17a51f072ff18a22b4f0257a4d9c24f75" + name = "github.com/stretchr/testify" + packages = [ + "assert", + "require", + ] + pruneopts = "" + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + digest = "1:793a79198b755828dec284c6f1325e24e09186f1b7ba818b65c7c35104ed86eb" + name = "golang.org/x/crypto" + packages = [ + "pkcs12", + "pkcs12/internal/rc2", + ] + pruneopts = "" + revision = "614d502a4dac94afa3a6ce146bd1736da82514c6" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/dgrijalva/jwt-go", + "github.com/dimchansky/utfbom", + "github.com/mitchellh/go-homedir", + "github.com/stretchr/testify/require", + "golang.org/x/crypto/pkcs12", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/Azure/go-autorest/Gopkg.toml b/vendor/github.com/Azure/go-autorest/Gopkg.toml new file mode 100644 index 000000000000..917dae3223d9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/Gopkg.toml @@ -0,0 +1,41 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + + +[[constraint]] + name = "github.com/dgrijalva/jwt-go" + version = "3.1.0" + +[[constraint]] + branch = "master" + name = "github.com/dimchansky/utfbom" + +[[constraint]] + branch = "master" + name = "github.com/mitchellh/go-homedir" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.2.0" + +[[constraint]] + branch = "master" + name = "golang.org/x/crypto" diff --git a/vendor/github.com/Azure/go-autorest/README.md b/vendor/github.com/Azure/go-autorest/README.md new file mode 100644 index 000000000000..9a7b13a3592e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/README.md @@ -0,0 +1,149 @@ +# go-autorest + +[![GoDoc](https://godoc.org/github.com/Azure/go-autorest/autorest?status.png)](https://godoc.org/github.com/Azure/go-autorest/autorest) +[![Build Status](https://travis-ci.org/Azure/go-autorest.svg?branch=master)](https://travis-ci.org/Azure/go-autorest) +[![Go Report Card](https://goreportcard.com/badge/Azure/go-autorest)](https://goreportcard.com/report/Azure/go-autorest) + +Package go-autorest provides an HTTP request client for use with [Autorest](https://github.com/Azure/autorest.go)-generated API client packages. + +An authentication client tested with Azure Active Directory (AAD) is also +provided in this repo in the package +`github.com/Azure/go-autorest/autorest/adal`. Despite its name, this package +is maintained only as part of the Azure Go SDK and is not related to other +"ADAL" libraries in [github.com/AzureAD](https://github.com/AzureAD). + +## Overview + +Package go-autorest implements an HTTP request pipeline suitable for use across +multiple goroutines and provides the shared routines used by packages generated +by [Autorest](https://github.com/Azure/autorest.go). + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + +```go + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) +``` + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + +```go + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) +``` + +will set the URL to: + +``` + https://microsoft.com/a/b/c +``` + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., `ByUnmarshallingJson`) is likely incorrect. + +Errors raised by autorest objects and methods will conform to the `autorest.Error` interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. + +## Helpers + +### Handling Swagger Dates + +The Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure correct +parsing and formatting. + +### Handling Empty Values + +In JSON, missing values have different semantics than empty values. This is especially true for +services using the HTTP PATCH verb. The JSON submitted with a PATCH request generally contains +only those values to modify. Missing values are to be left unchanged. Developers, then, require a +means to both specify an empty value and to leave the value out of the submitted JSON. + +The Go JSON package (`encoding/json`) supports the `omitempty` tag. When specified, it omits +empty values from the rendered JSON. Since Go defines default values for all base types (such as "" +for string and 0 for int) and provides no means to mark a value as actually empty, the JSON package +treats default values as meaning empty, omitting them from the rendered JSON. This means that, using +the Go base types encoded through the default JSON package, it is not possible to create JSON to +clear a value at the server. + +The workaround within the Go community is to use pointers to base types in lieu of base types within +structures that map to JSON. For example, instead of a value of type `string`, the workaround uses +`*string`. While this enables distinguishing empty values from those to be unchanged, creating +pointers to a base type (notably constant, in-line values) requires additional variables. This, for +example, + +```go + s := struct { + S *string + }{ S: &"foo" } +``` +fails, while, this + +```go + v := "foo" + s := struct { + S *string + }{ S: &v } +``` +succeeds. + +To ease using pointers, the subpackage `to` contains helpers that convert to and from pointers for +Go base types which have Swagger analogs. It also provides a helper that converts between +`map[string]string` and `map[string]*string`, enabling the JSON to specify that the value +associated with a key should be cleared. With the helpers, the previous example becomes + +```go + s := struct { + S *string + }{ S: to.StringPtr("foo") } +``` + +## Install + +```bash +go get github.com/Azure/go-autorest/autorest +go get github.com/Azure/go-autorest/autorest/azure +go get github.com/Azure/go-autorest/autorest/date +go get github.com/Azure/go-autorest/autorest/to +``` + +## License + +See LICENSE file. + +----- + +This project has adopted the [Microsoft Open Source Code of +Conduct](https://opensource.microsoft.com/codeofconduct/). For more information +see the [Code of Conduct +FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact +[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional +questions or comments. diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/README.md b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md new file mode 100644 index 000000000000..7b0c4bc4d21a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/README.md @@ -0,0 +1,292 @@ +# Azure Active Directory authentication for Go + +This is a standalone package for authenticating with Azure Active +Directory from other Go libraries and applications, in particular the [Azure SDK +for Go](https://github.com/Azure/azure-sdk-for-go). + +Note: Despite the package's name it is not related to other "ADAL" libraries +maintained in the [github.com/AzureAD](https://github.com/AzureAD) org. Issues +should be opened in [this repo's](https://github.com/Azure/go-autorest/issues) +or [the SDK's](https://github.com/Azure/azure-sdk-for-go/issues) issue +trackers. + +## Install + +```bash +go get -u github.com/Azure/go-autorest/autorest/adal +``` + +## Usage + +An Active Directory application is required in order to use this library. An application can be registered in the [Azure Portal](https://portal.azure.com/) by following these [guidelines](https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-integrating-applications) or using the [Azure CLI](https://github.com/Azure/azure-cli). + +### Register an Azure AD Application with secret + + +1. Register a new application with a `secret` credential + + ``` + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --password secret + ``` + +2. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "Application ID" + ``` + + * Replace `Application ID` with `appId` from step 1. + +### Register an Azure AD Application with certificate + +1. Create a private key + + ``` + openssl genrsa -out "example-app.key" 2048 + ``` + +2. Create the certificate + + ``` + openssl req -new -key "example-app.key" -subj "/CN=example-app" -out "example-app.csr" + openssl x509 -req -in "example-app.csr" -signkey "example-app.key" -out "example-app.crt" -days 10000 + ``` + +3. Create the PKCS12 version of the certificate containing also the private key + + ``` + openssl pkcs12 -export -out "example-app.pfx" -inkey "example-app.key" -in "example-app.crt" -passout pass: + + ``` + +4. Register a new application with the certificate content form `example-app.crt` + + ``` + certificateContents="$(tail -n+2 "example-app.crt" | head -n-1)" + + az ad app create \ + --display-name example-app \ + --homepage https://example-app/home \ + --identifier-uris https://example-app/app \ + --key-usage Verify --end-date 2018-01-01 \ + --key-value "${certificateContents}" + ``` + +5. Create a service principal using the `Application ID` from previous step + + ``` + az ad sp create --id "APPLICATION_ID" + ``` + + * Replace `APPLICATION_ID` with `appId` from step 4. + + +### Grant the necessary permissions + +Azure relies on a Role-Based Access Control (RBAC) model to manage the access to resources at a fine-grained +level. There is a set of [pre-defined roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-built-in-roles) +which can be assigned to a service principal of an Azure AD application depending of your needs. + +``` +az role assignment create --assigner "SERVICE_PRINCIPAL_ID" --role "ROLE_NAME" +``` + +* Replace the `SERVICE_PRINCIPAL_ID` with the `appId` from previous step. +* Replace the `ROLE_NAME` with a role name of your choice. + +It is also possible to define custom role definitions. + +``` +az role definition create --role-definition role-definition.json +``` + +* Check [custom roles](https://docs.microsoft.com/en-us/azure/active-directory/role-based-access-control-custom-roles) for more details regarding the content of `role-definition.json` file. + + +### Acquire Access Token + +The common configuration used by all flows: + +```Go +const activeDirectoryEndpoint = "https://login.microsoftonline.com/" +tenantID := "TENANT_ID" +oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + +applicationID := "APPLICATION_ID" + +callback := func(token adal.Token) error { + // This is called after the token is acquired +} + +// The resource for which the token is acquired +resource := "https://management.core.windows.net/" +``` + +* Replace the `TENANT_ID` with your tenant ID. +* Replace the `APPLICATION_ID` with the value from previous section. + +#### Client Credentials + +```Go +applicationSecret := "APPLICATION_SECRET" + +spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) +if err != nil { + return nil, err +} + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Replace the `APPLICATION_SECRET` with the `password` value from previous section. + +#### Client Certificate + +```Go +certificatePath := "./example-app.pfx" + +certData, err := ioutil.ReadFile(certificatePath) +if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) +} + +// Get the certificate and private key from pfx file +certificate, rsaPrivateKey, err := decodePkcs12(certData, "") +if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) +} + +spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + +// Acquire a new access token +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +* Update the certificate path to point to the example-app.pfx file which was created in previous section. + + +#### Device Code + +```Go +oauthClient := &http.Client{} + +// Acquire the device code +deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) +if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) +} + +// Display the authentication message +fmt.Println(*deviceCode.Message) + +// Wait here until the user is authenticated +token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) +if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) +} + +spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Username password authenticate + +```Go +spt, err := adal.NewServicePrincipalTokenFromUsernamePassword( + oauthConfig, + applicationID, + username, + password, + resource, + callbacks...) + +if (err == nil) { + token := spt.Token +} +``` + +#### Authorization code authenticate + +``` Go +spt, err := adal.NewServicePrincipalTokenFromAuthorizationCode( + oauthConfig, + applicationID, + clientSecret, + authorizationCode, + redirectURI, + resource, + callbacks...) + +err = spt.Refresh() +if (err == nil) { + token := spt.Token +} +``` + +### Command Line Tool + +A command line tool is available in `cmd/adal.go` that can acquire a token for a given resource. It supports all flows mentioned above. + +``` +adal -h + +Usage of ./adal: + -applicationId string + application id + -certificatePath string + path to pk12/PFC application certificate + -mode string + authentication mode (device, secret, cert, refresh) (default "device") + -resource string + resource for which the token is requested + -secret string + application secret + -tenantId string + tenant id + -tokenCachePath string + location of oath token cache (default "/home/cgc/.adal/accessToken.json") +``` + +Example acquire a token for `https://management.core.windows.net/` using device code flow: + +``` +adal -mode device \ + -applicationId "APPLICATION_ID" \ + -tenantId "TENANT_ID" \ + -resource https://management.core.windows.net/ + +``` diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go b/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go new file mode 100644 index 000000000000..7214dcabb338 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/cmd/adal.go @@ -0,0 +1,298 @@ +package main + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "flag" + "fmt" + "log" + "strings" + + "crypto/rsa" + "crypto/x509" + "io/ioutil" + "net/http" + "os/user" + + "github.com/Azure/go-autorest/autorest/adal" + "golang.org/x/crypto/pkcs12" +) + +const ( + deviceMode = "device" + clientSecretMode = "secret" + clientCertMode = "cert" + refreshMode = "refresh" + + activeDirectoryEndpoint = "https://login.microsoftonline.com/" +) + +type option struct { + name string + value string +} + +var ( + mode string + resource string + + tenantID string + applicationID string + + applicationSecret string + certificatePath string + + tokenCachePath string +) + +func checkMandatoryOptions(mode string, options ...option) { + for _, option := range options { + if strings.TrimSpace(option.value) == "" { + log.Fatalf("Authentication mode '%s' requires mandatory option '%s'.", mode, option.name) + } + } +} + +func defaultTokenCachePath() string { + usr, err := user.Current() + if err != nil { + log.Fatal(err) + } + defaultTokenPath := usr.HomeDir + "/.adal/accessToken.json" + return defaultTokenPath +} + +func init() { + flag.StringVar(&mode, "mode", "device", "authentication mode (device, secret, cert, refresh)") + flag.StringVar(&resource, "resource", "", "resource for which the token is requested") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&applicationSecret, "secret", "", "application secret") + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/PFC application certificate") + flag.StringVar(&tokenCachePath, "tokenCachePath", defaultTokenCachePath(), "location of oath token cache") + + flag.Parse() + + switch mode = strings.TrimSpace(mode); mode { + case clientSecretMode: + checkMandatoryOptions(clientSecretMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + option{name: "secret", value: applicationSecret}, + ) + case clientCertMode: + checkMandatoryOptions(clientCertMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + option{name: "certificatePath", value: certificatePath}, + ) + case deviceMode: + checkMandatoryOptions(deviceMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + ) + case refreshMode: + checkMandatoryOptions(refreshMode, + option{name: "resource", value: resource}, + option{name: "tenantId", value: tenantID}, + option{name: "applicationId", value: applicationID}, + ) + default: + log.Fatalln("Authentication modes 'secret, 'cert', 'device' or 'refresh' are supported.") + } +} + +func acquireTokenClientSecretFlow(oauthConfig adal.OAuthConfig, + appliationID string, + applicationSecret string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + spt, err := adal.NewServicePrincipalToken( + oauthConfig, + appliationID, + applicationSecret, + resource, + callbacks...) + if err != nil { + return nil, err + } + + return spt, spt.Refresh() +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +func acquireTokenClientCertFlow(oauthConfig adal.OAuthConfig, + applicationID string, + applicationCertPath string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spt, err := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + applicationID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + if err != nil { + return nil, err + } + + return spt, spt.Refresh() +} + +func acquireTokenDeviceCodeFlow(oauthConfig adal.OAuthConfig, + applicationID string, + resource string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + oauthClient := &http.Client{} + deviceCode, err := adal.InitiateDeviceAuth( + oauthClient, + oauthConfig, + applicationID, + resource) + if err != nil { + return nil, fmt.Errorf("Failed to start device auth flow: %s", err) + } + + fmt.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("Failed to finish device auth flow: %s", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + return spt, err +} + +func refreshToken(oauthConfig adal.OAuthConfig, + applicationID string, + resource string, + tokenCachePath string, + callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + + token, err := adal.LoadToken(tokenCachePath) + if err != nil { + return nil, fmt.Errorf("failed to load token from cache: %v", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + applicationID, + resource, + *token, + callbacks...) + if err != nil { + return nil, err + } + return spt, spt.Refresh() +} + +func saveToken(spt adal.Token) error { + if tokenCachePath != "" { + err := adal.SaveToken(tokenCachePath, 0600, spt) + if err != nil { + return err + } + log.Printf("Acquired token was saved in '%s' file\n", tokenCachePath) + return nil + + } + return fmt.Errorf("empty path for token cache") +} + +func main() { + oauthConfig, err := adal.NewOAuthConfig(activeDirectoryEndpoint, tenantID) + if err != nil { + panic(err) + } + + callback := func(token adal.Token) error { + return saveToken(token) + } + + log.Printf("Authenticating with mode '%s'\n", mode) + switch mode { + case clientSecretMode: + _, err = acquireTokenClientSecretFlow( + *oauthConfig, + applicationID, + applicationSecret, + resource, + callback) + case clientCertMode: + _, err = acquireTokenClientCertFlow( + *oauthConfig, + applicationID, + certificatePath, + resource, + callback) + case deviceMode: + var spt *adal.ServicePrincipalToken + spt, err = acquireTokenDeviceCodeFlow( + *oauthConfig, + applicationID, + resource, + callback) + if err == nil { + err = saveToken(spt.Token()) + } + case refreshMode: + _, err = refreshToken( + *oauthConfig, + applicationID, + resource, + tokenCachePath, + callback) + } + + if err != nil { + log.Fatalf("Failed to acquire a token for resource %s. Error: %v", resource, err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go new file mode 100644 index 000000000000..bee5e61ddb2c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config.go @@ -0,0 +1,81 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/url" +) + +const ( + activeDirectoryAPIVersion = "1.0" +) + +// OAuthConfig represents the endpoints needed +// in OAuth operations +type OAuthConfig struct { + AuthorityEndpoint url.URL `json:"authorityEndpoint"` + AuthorizeEndpoint url.URL `json:"authorizeEndpoint"` + TokenEndpoint url.URL `json:"tokenEndpoint"` + DeviceCodeEndpoint url.URL `json:"deviceCodeEndpoint"` +} + +// IsZero returns true if the OAuthConfig object is zero-initialized. +func (oac OAuthConfig) IsZero() bool { + return oac == OAuthConfig{} +} + +func validateStringParam(param, name string) error { + if len(param) == 0 { + return fmt.Errorf("parameter '" + name + "' cannot be empty") + } + return nil +} + +// NewOAuthConfig returns an OAuthConfig with tenant specific urls +func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) { + if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil { + return nil, err + } + // it's legal for tenantID to be empty so don't validate it + const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s" + u, err := url.Parse(activeDirectoryEndpoint) + if err != nil { + return nil, err + } + authorityURL, err := u.Parse(tenantID) + if err != nil { + return nil, err + } + authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion)) + if err != nil { + return nil, err + } + + return &OAuthConfig{ + AuthorityEndpoint: *authorityURL, + AuthorizeEndpoint: *authorizeURL, + TokenEndpoint: *tokenURL, + DeviceCodeEndpoint: *deviceCodeURL, + }, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go new file mode 100644 index 000000000000..304280f664ad --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/config_test.go @@ -0,0 +1,44 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "testing" +) + +func TestNewOAuthConfig(t *testing.T) { + const testActiveDirectoryEndpoint = "https://login.test.com" + const testTenantID = "tenant-id-test" + + config, err := NewOAuthConfig(testActiveDirectoryEndpoint, testTenantID) + if err != nil { + t.Fatalf("autorest/adal: Unexpected error while creating oauth configuration for tenant: %v.", err) + } + + expected := "https://login.test.com/tenant-id-test/oauth2/authorize?api-version=1.0" + if config.AuthorizeEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.AuthorizeEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/token?api-version=1.0" + if config.TokenEndpoint.String() != expected { + t.Fatalf("autorest/adal: Incorrect authorize url for Tenant from Environment. expected(%s). actual(%v).", expected, config.TokenEndpoint) + } + + expected = "https://login.test.com/tenant-id-test/oauth2/devicecode?api-version=1.0" + if config.DeviceCodeEndpoint.String() != expected { + t.Fatalf("autorest/adal Incorrect devicecode url for Tenant from Environment. expected(%s). actual(%v).", expected, config.DeviceCodeEndpoint) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go new file mode 100644 index 000000000000..b38f4c245897 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken.go @@ -0,0 +1,242 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* + This file is largely based on rjw57/oauth2device's code, with the follow differences: + * scope -> resource, and only allow a single one + * receive "Message" in the DeviceCode struct and show it to users as the prompt + * azure-xplat-cli has the following behavior that this emulates: + - does not send client_secret during the token exchange + - sends resource again in the token exchange request +*/ + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" +) + +const ( + logPrefix = "autorest/adal/devicetoken:" +) + +var ( + // ErrDeviceGeneric represents an unknown error from the token endpoint when using device flow + ErrDeviceGeneric = fmt.Errorf("%s Error while retrieving OAuth token: Unknown Error", logPrefix) + + // ErrDeviceAccessDenied represents an access denied error from the token endpoint when using device flow + ErrDeviceAccessDenied = fmt.Errorf("%s Error while retrieving OAuth token: Access Denied", logPrefix) + + // ErrDeviceAuthorizationPending represents the server waiting on the user to complete the device flow + ErrDeviceAuthorizationPending = fmt.Errorf("%s Error while retrieving OAuth token: Authorization Pending", logPrefix) + + // ErrDeviceCodeExpired represents the server timing out and expiring the code during device flow + ErrDeviceCodeExpired = fmt.Errorf("%s Error while retrieving OAuth token: Code Expired", logPrefix) + + // ErrDeviceSlowDown represents the service telling us we're polling too often during device flow + ErrDeviceSlowDown = fmt.Errorf("%s Error while retrieving OAuth token: Slow Down", logPrefix) + + // ErrDeviceCodeEmpty represents an empty device code from the device endpoint while using device flow + ErrDeviceCodeEmpty = fmt.Errorf("%s Error while retrieving device code: Device Code Empty", logPrefix) + + // ErrOAuthTokenEmpty represents an empty OAuth token from the token endpoint when using device flow + ErrOAuthTokenEmpty = fmt.Errorf("%s Error while retrieving OAuth token: Token Empty", logPrefix) + + errCodeSendingFails = "Error occurred while sending request for Device Authorization Code" + errCodeHandlingFails = "Error occurred while handling response from the Device Endpoint" + errTokenSendingFails = "Error occurred while sending request with device code for a token" + errTokenHandlingFails = "Error occurred while handling response from the Token Endpoint (during device flow)" + errStatusNotOK = "Error HTTP status != 200" +) + +// DeviceCode is the object returned by the device auth endpoint +// It contains information to instruct the user to complete the auth flow +type DeviceCode struct { + DeviceCode *string `json:"device_code,omitempty"` + UserCode *string `json:"user_code,omitempty"` + VerificationURL *string `json:"verification_url,omitempty"` + ExpiresIn *int64 `json:"expires_in,string,omitempty"` + Interval *int64 `json:"interval,string,omitempty"` + + Message *string `json:"message"` // Azure specific + Resource string // store the following, stored when initiating, used when exchanging + OAuthConfig OAuthConfig + ClientID string +} + +// TokenError is the object returned by the token exchange endpoint +// when something is amiss +type TokenError struct { + Error *string `json:"error,omitempty"` + ErrorCodes []int `json:"error_codes,omitempty"` + ErrorDescription *string `json:"error_description,omitempty"` + Timestamp *string `json:"timestamp,omitempty"` + TraceID *string `json:"trace_id,omitempty"` +} + +// DeviceToken is the object return by the token exchange endpoint +// It can either look like a Token or an ErrorToken, so put both here +// and check for presence of "Error" to know if we are in error state +type deviceToken struct { + Token + TokenError +} + +// InitiateDeviceAuth initiates a device auth flow. It returns a DeviceCode +// that can be used with CheckForUserCompletion or WaitForUserCompletion. +func InitiateDeviceAuth(sender Sender, oauthConfig OAuthConfig, clientID, resource string) (*DeviceCode, error) { + v := url.Values{ + "client_id": []string{clientID}, + "resource": []string{resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, oauthConfig.DeviceCodeEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, errStatusNotOK) + } + + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrDeviceCodeEmpty + } + + var code DeviceCode + err = json.Unmarshal(rb, &code) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errCodeHandlingFails, err.Error()) + } + + code.ClientID = clientID + code.Resource = resource + code.OAuthConfig = oauthConfig + + return &code, nil +} + +// CheckForUserCompletion takes a DeviceCode and checks with the Azure AD OAuth endpoint +// to see if the device flow has: been completed, timed out, or otherwise failed +func CheckForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + v := url.Values{ + "client_id": []string{code.ClientID}, + "code": []string{*code.DeviceCode}, + "grant_type": []string{OAuthGrantTypeDeviceCode}, + "resource": []string{code.Resource}, + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + + req, err := http.NewRequest(http.MethodPost, code.OAuthConfig.TokenEndpoint.String(), body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + resp, err := sender.Do(req) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenSendingFails, err.Error()) + } + defer resp.Body.Close() + + rb, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if resp.StatusCode != http.StatusOK && len(strings.Trim(string(rb), " ")) == 0 { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, errStatusNotOK) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return nil, ErrOAuthTokenEmpty + } + + var token deviceToken + err = json.Unmarshal(rb, &token) + if err != nil { + return nil, fmt.Errorf("%s %s: %s", logPrefix, errTokenHandlingFails, err.Error()) + } + + if token.Error == nil { + return &token.Token, nil + } + + switch *token.Error { + case "authorization_pending": + return nil, ErrDeviceAuthorizationPending + case "slow_down": + return nil, ErrDeviceSlowDown + case "access_denied": + return nil, ErrDeviceAccessDenied + case "code_expired": + return nil, ErrDeviceCodeExpired + default: + return nil, ErrDeviceGeneric + } +} + +// WaitForUserCompletion calls CheckForUserCompletion repeatedly until a token is granted or an error state occurs. +// This prevents the user from looping and checking against 'ErrDeviceAuthorizationPending'. +func WaitForUserCompletion(sender Sender, code *DeviceCode) (*Token, error) { + intervalDuration := time.Duration(*code.Interval) * time.Second + waitDuration := intervalDuration + + for { + token, err := CheckForUserCompletion(sender, code) + + if err == nil { + return token, nil + } + + switch err { + case ErrDeviceSlowDown: + waitDuration += waitDuration + case ErrDeviceAuthorizationPending: + // noop + default: // everything else is "fatal" to us + return nil, err + } + + if waitDuration > (intervalDuration * 3) { + return nil, fmt.Errorf("%s Error waiting for user to complete device flow. Server told us to slow_down too much", logPrefix) + } + + time.Sleep(waitDuration) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go new file mode 100644 index 000000000000..6beee161fa3c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/devicetoken_test.go @@ -0,0 +1,330 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "net/http" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestResource = "SomeResource" + TestClientID = "SomeClientID" + TestTenantID = "SomeTenantID" + TestActiveDirectoryEndpoint = "https://login.test.com/" +) + +var ( + testOAuthConfig, _ = NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + TestOAuthConfig = *testOAuthConfig +) + +const MockDeviceCodeResponse = ` +{ + "device_code": "10000-40-1234567890", + "user_code": "ABCDEF", + "verification_url": "http://aka.ms/deviceauth", + "expires_in": "900", + "interval": "0" +} +` + +const MockDeviceTokenResponse = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +} +` + +func TestDeviceCodeIncludesResource(t *testing.T) { + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithContent(MockDeviceCodeResponse)) + + code, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err != nil { + t.Fatalf("adal: unexpected error initiating device auth") + } + + if code.Resource != TestResource { + t.Fatalf("adal: InitiateDeviceAuth failed to stash the resource in the DeviceCode struct") + } +} + +func TestDeviceCodeReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeSendingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeSendingFails, err.Error()) + } +} + +func TestDeviceCodeReturnsErrorIfBadRequest(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("doesn't matter") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfCannotDeserializeDeviceCode(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceCodeResponse, "expires_in", "\":, :gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err == nil || !strings.Contains(err.Error(), errCodeHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errCodeHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceCodeReturnsErrorIfEmptyDeviceCode(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := InitiateDeviceAuth(sender, TestOAuthConfig, TestClientID, TestResource) + if err != ErrDeviceCodeEmpty { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", ErrDeviceCodeEmpty, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func deviceCode() *DeviceCode { + var deviceCode DeviceCode + _ = json.Unmarshal([]byte(MockDeviceCodeResponse), &deviceCode) + deviceCode.Resource = TestResource + deviceCode.ClientID = TestClientID + return &deviceCode +} + +func TestDeviceTokenReturns(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(MockDeviceTokenResponse) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("adal: got error unexpectedly") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSendingFails(t *testing.T) { + sender := mocks.NewSender() + sender.SetError(fmt.Errorf("this is an error")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenSendingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenSendingFails, err.Error()) + } +} + +func TestDeviceTokenReturnsErrorIfServerError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusInternalServerError, "Internal Server Error")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCannotDeserializeDeviceToken(t *testing.T) { + gibberishJSON := strings.Replace(MockDeviceTokenResponse, "expires_in", ";:\"gibberish", -1) + sender := mocks.NewSender() + body := mocks.NewBody(gibberishJSON) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil || !strings.Contains(err.Error(), errTokenHandlingFails) { + t.Fatalf("adal: failed to get correct error expected(%s) actual(%s)", errTokenHandlingFails, err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func errorDeviceTokenResponse(message string) string { + return `{ "error": "` + message + `" }` +} + +func TestDeviceTokenReturnsErrorIfAuthorizationPending(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("authorization_pending")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := CheckForUserCompletion(sender, deviceCode()) + if err != ErrDeviceAuthorizationPending { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfSlowDown(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("slow_down")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := CheckForUserCompletion(sender, deviceCode()) + if err != ErrDeviceSlowDown { + t.Fatalf("!!!") + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +type deviceTokenSender struct { + errorString string + attempts int +} + +func newDeviceTokenSender(deviceErrorString string) *deviceTokenSender { + return &deviceTokenSender{errorString: deviceErrorString, attempts: 0} +} + +func (s *deviceTokenSender) Do(req *http.Request) (*http.Response, error) { + var resp *http.Response + if s.attempts < 1 { + s.attempts++ + resp = mocks.NewResponseWithContent(errorDeviceTokenResponse(s.errorString)) + } else { + resp = mocks.NewResponseWithContent(MockDeviceTokenResponse) + } + return resp, nil +} + +// since the above only exercise CheckForUserCompletion, we repeat the test here, +// but with the intent of showing that WaitForUserCompletion loops properly. +func TestDeviceTokenSucceedsWithIntermediateAuthPending(t *testing.T) { + sender := newDeviceTokenSender("authorization_pending") + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +// same as above but with SlowDown now +func TestDeviceTokenSucceedsWithIntermediateSlowDown(t *testing.T) { + sender := newDeviceTokenSender("slow_down") + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != nil { + t.Fatalf("unexpected error occurred") + } +} + +func TestDeviceTokenReturnsErrorIfAccessDenied(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("access_denied")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrDeviceAccessDenied { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceAccessDenied.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfCodeExpired(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("code_expired")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrDeviceCodeExpired { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceCodeExpired.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorForUnknownError(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody(errorDeviceTokenResponse("unknown_error")) + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusBadRequest, "Bad Request")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err == nil { + t.Fatalf("failed to get error") + } + if err != ErrDeviceGeneric { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrDeviceGeneric.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} + +func TestDeviceTokenReturnsErrorIfTokenEmptyAndStatusOK(t *testing.T) { + sender := mocks.NewSender() + body := mocks.NewBody("") + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK")) + + _, err := WaitForUserCompletion(sender, deviceCode()) + if err != ErrOAuthTokenEmpty { + t.Fatalf("adal: got wrong error expected(%s) actual(%s)", ErrOAuthTokenEmpty.Error(), err.Error()) + } + + if body.IsOpen() { + t.Fatalf("response body was left open!") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go new file mode 100644 index 000000000000..9e15f2751f27 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist.go @@ -0,0 +1,73 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" +) + +// LoadToken restores a Token object from a file located at 'path'. +func LoadToken(path string) (*Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var token Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&token); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into Token representation: %v", path, err) + } + return &token, nil +} + +// SaveToken persists an oauth token at the given location on disk. +// It moves the new file into place so it can safely be used to replace an existing file +// that maybe accessed by multiple processes. +func SaveToken(path string, mode os.FileMode, token Token) error { + dir := filepath.Dir(path) + err := os.MkdirAll(dir, os.ModePerm) + if err != nil { + return fmt.Errorf("failed to create directory (%s) to store token in: %v", dir, err) + } + + newFile, err := ioutil.TempFile(dir, "token") + if err != nil { + return fmt.Errorf("failed to create the temp file to write the token: %v", err) + } + tempPath := newFile.Name() + + if err := json.NewEncoder(newFile).Encode(token); err != nil { + return fmt.Errorf("failed to encode token to file (%s) while saving token: %v", tempPath, err) + } + if err := newFile.Close(); err != nil { + return fmt.Errorf("failed to close temp file %s: %v", tempPath, err) + } + + // Atomic replace to avoid multi-writer file corruptions + if err := os.Rename(tempPath, path); err != nil { + return fmt.Errorf("failed to move temporary token to desired output location. src=%s dst=%s: %v", tempPath, path, err) + } + if err := os.Chmod(path, mode); err != nil { + return fmt.Errorf("failed to chmod the token file %s: %v", path, err) + } + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go new file mode 100644 index 000000000000..a9c287c6dd6e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/persist_test.go @@ -0,0 +1,171 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "io/ioutil" + "os" + "path" + "reflect" + "runtime" + "strings" + "testing" +) + +const MockTokenJSON string = `{ + "access_token": "accessToken", + "refresh_token": "refreshToken", + "expires_in": "1000", + "expires_on": "2000", + "not_before": "3000", + "resource": "resource", + "token_type": "type" +}` + +var TestToken = Token{ + AccessToken: "accessToken", + RefreshToken: "refreshToken", + ExpiresIn: "1000", + ExpiresOn: "2000", + NotBefore: "3000", + Resource: "resource", + Type: "type", +} + +func writeTestTokenFile(t *testing.T, suffix string, contents string) *os.File { + f, err := ioutil.TempFile(os.TempDir(), suffix) + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer f.Close() + + _, err = f.Write([]byte(contents)) + if err != nil { + t.Fatalf("azure: unexpected error when writing temp test file: %v", err) + } + + return f +} + +func TestLoadToken(t *testing.T) { + f := writeTestTokenFile(t, "testloadtoken", MockTokenJSON) + defer os.Remove(f.Name()) + + expectedToken := TestToken + actualToken, err := LoadToken(f.Name()) + if err != nil { + t.Fatalf("azure: unexpected error loading token from file: %v", err) + } + + if *actualToken != expectedToken { + t.Fatalf("azure: failed to decode properly expected(%v) actual(%v)", expectedToken, *actualToken) + } + + // test that LoadToken closes the file properly + err = SaveToken(f.Name(), 0600, *actualToken) + if err != nil { + t.Fatalf("azure: could not save token after LoadToken: %v", err) + } +} + +func TestLoadTokenFailsBadPath(t *testing.T) { + _, err := LoadToken("/tmp/this_file_should_never_exist_really") + expectedSubstring := "failed to open file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func TestLoadTokenFailsBadJson(t *testing.T) { + gibberishJSON := strings.Replace(MockTokenJSON, "expires_on", ";:\"gibberish", -1) + f := writeTestTokenFile(t, "testloadtokenfailsbadjson", gibberishJSON) + defer os.Remove(f.Name()) + + _, err := LoadToken(f.Name()) + expectedSubstring := "failed to decode contents of file" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%s)", expectedSubstring, err.Error()) + } +} + +func token() *Token { + var token Token + json.Unmarshal([]byte(MockTokenJSON), &token) + return &token +} + +func TestSaveToken(t *testing.T) { + f, err := ioutil.TempFile("", "testloadtoken") + if err != nil { + t.Fatalf("azure: unexpected error when creating temp file: %v", err) + } + defer os.Remove(f.Name()) + f.Close() + + mode := os.ModePerm & 0642 + err = SaveToken(f.Name(), mode, *token()) + if err != nil { + t.Fatalf("azure: unexpected error saving token to file: %v", err) + } + fi, err := os.Stat(f.Name()) // open a new stat as held ones are not fresh + if err != nil { + t.Fatalf("azure: stat failed: %v", err) + } + if runtime.GOOS != "windows" { // permissions don't work on Windows + if perm := fi.Mode().Perm(); perm != mode { + t.Fatalf("azure: wrong file perm. got:%s; expected:%s file :%s", perm, mode, f.Name()) + } + } + + var actualToken Token + var expectedToken Token + + json.Unmarshal([]byte(MockTokenJSON), expectedToken) + + contents, err := ioutil.ReadFile(f.Name()) + if err != nil { + t.Fatal("!!") + } + json.Unmarshal(contents, actualToken) + + if !reflect.DeepEqual(actualToken, expectedToken) { + t.Fatal("azure: token was not serialized correctly") + } +} + +func TestSaveTokenFailsNoPermission(t *testing.T) { + pathWhereWeShouldntHavePermission := "/usr/thiswontwork/atall" + if runtime.GOOS == "windows" { + pathWhereWeShouldntHavePermission = path.Join(os.Getenv("windir"), "system32\\mytokendir\\mytoken") + } + err := SaveToken(pathWhereWeShouldntHavePermission, 0644, *token()) + expectedSubstring := "failed to create directory" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} + +func TestSaveTokenFailsCantCreate(t *testing.T) { + tokenPath := "/thiswontwork" + if runtime.GOOS == "windows" { + tokenPath = path.Join(os.Getenv("windir"), "system32") + } + err := SaveToken(tokenPath, 0644, *token()) + expectedSubstring := "failed to create the temp file to write the token" + if err == nil || !strings.Contains(err.Error(), expectedSubstring) { + t.Fatalf("azure: failed to get correct error expected(%s) actual(%v)", expectedSubstring, err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go new file mode 100644 index 000000000000..0e5ad14d3962 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/sender.go @@ -0,0 +1,60 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" +) + +const ( + contentType = "Content-Type" + mimeTypeFormPost = "application/x-www-form-urlencoded" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go new file mode 100644 index 000000000000..32aea8389127 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token.go @@ -0,0 +1,968 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math" + "net" + "net/http" + "net/url" + "strconv" + "strings" + "sync" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/version" + "github.com/dgrijalva/jwt-go" +) + +const ( + defaultRefresh = 5 * time.Minute + + // OAuthGrantTypeDeviceCode is the "grant_type" identifier used in device flow + OAuthGrantTypeDeviceCode = "device_code" + + // OAuthGrantTypeClientCredentials is the "grant_type" identifier used in credential flows + OAuthGrantTypeClientCredentials = "client_credentials" + + // OAuthGrantTypeUserPass is the "grant_type" identifier used in username and password auth flows + OAuthGrantTypeUserPass = "password" + + // OAuthGrantTypeRefreshToken is the "grant_type" identifier used in refresh token flows + OAuthGrantTypeRefreshToken = "refresh_token" + + // OAuthGrantTypeAuthorizationCode is the "grant_type" identifier used in authorization code flows + OAuthGrantTypeAuthorizationCode = "authorization_code" + + // metadataHeader is the header required by MSI extension + metadataHeader = "Metadata" + + // msiEndpoint is the well known endpoint for getting MSI authentications tokens + msiEndpoint = "http://169.254.169.254/metadata/identity/oauth2/token" + + // the default number of attempts to refresh an MSI authentication token + defaultMaxMSIRefreshAttempts = 5 +) + +// OAuthTokenProvider is an interface which should be implemented by an access token retriever +type OAuthTokenProvider interface { + OAuthToken() string +} + +// TokenRefreshError is an interface used by errors returned during token refresh. +type TokenRefreshError interface { + error + Response() *http.Response +} + +// Refresher is an interface for token refresh functionality +type Refresher interface { + Refresh() error + RefreshExchange(resource string) error + EnsureFresh() error +} + +// RefresherWithContext is an interface for token refresh functionality +type RefresherWithContext interface { + RefreshWithContext(ctx context.Context) error + RefreshExchangeWithContext(ctx context.Context, resource string) error + EnsureFreshWithContext(ctx context.Context) error +} + +// TokenRefreshCallback is the type representing callbacks that will be called after +// a successful token refresh +type TokenRefreshCallback func(Token) error + +// Token encapsulates the access token used to authorize Azure requests. +type Token struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + + ExpiresIn string `json:"expires_in"` + ExpiresOn string `json:"expires_on"` + NotBefore string `json:"not_before"` + + Resource string `json:"resource"` + Type string `json:"token_type"` +} + +// IsZero returns true if the token object is zero-initialized. +func (t Token) IsZero() bool { + return t == Token{} +} + +// Expires returns the time.Time when the Token expires. +func (t Token) Expires() time.Time { + s, err := strconv.Atoi(t.ExpiresOn) + if err != nil { + s = -3600 + } + + expiration := date.NewUnixTimeFromSeconds(float64(s)) + + return time.Time(expiration).UTC() +} + +// IsExpired returns true if the Token is expired, false otherwise. +func (t Token) IsExpired() bool { + return t.WillExpireIn(0) +} + +// WillExpireIn returns true if the Token will expire after the passed time.Duration interval +// from now, false otherwise. +func (t Token) WillExpireIn(d time.Duration) bool { + return !t.Expires().After(time.Now().Add(d)) +} + +//OAuthToken return the current access token +func (t *Token) OAuthToken() string { + return t.AccessToken +} + +// ServicePrincipalSecret is an interface that allows various secret mechanism to fill the form +// that is submitted when acquiring an oAuth token. +type ServicePrincipalSecret interface { + SetAuthenticationValues(spt *ServicePrincipalToken, values *url.Values) error +} + +// ServicePrincipalNoSecret represents a secret type that contains no secret +// meaning it is not valid for fetching a fresh token. This is used by Manual +type ServicePrincipalNoSecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret +// It only returns an error for the ServicePrincipalNoSecret type +func (noSecret *ServicePrincipalNoSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return fmt.Errorf("Manually created ServicePrincipalToken does not contain secret material to retrieve a new access token") +} + +// MarshalJSON implements the json.Marshaler interface. +func (noSecret ServicePrincipalNoSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalNoSecret", + }) +} + +// ServicePrincipalTokenSecret implements ServicePrincipalSecret for client_secret type authorization. +type ServicePrincipalTokenSecret struct { + ClientSecret string `json:"value"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using the client_secret. +func (tokenSecret *ServicePrincipalTokenSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("client_secret", tokenSecret.ClientSecret) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (tokenSecret ServicePrincipalTokenSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalTokenSecret", + Value: tokenSecret.ClientSecret, + }) +} + +// ServicePrincipalCertificateSecret implements ServicePrincipalSecret for generic RSA cert auth with signed JWTs. +type ServicePrincipalCertificateSecret struct { + Certificate *x509.Certificate + PrivateKey *rsa.PrivateKey +} + +// SignJwt returns the JWT signed with the certificate's private key. +func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalToken) (string, error) { + hasher := sha1.New() + _, err := hasher.Write(secret.Certificate.Raw) + if err != nil { + return "", err + } + + thumbprint := base64.URLEncoding.EncodeToString(hasher.Sum(nil)) + + // The jti (JWT ID) claim provides a unique identifier for the JWT. + jti := make([]byte, 20) + _, err = rand.Read(jti) + if err != nil { + return "", err + } + + token := jwt.New(jwt.SigningMethodRS256) + token.Header["x5t"] = thumbprint + token.Claims = jwt.MapClaims{ + "aud": spt.inner.OauthConfig.TokenEndpoint.String(), + "iss": spt.inner.ClientID, + "sub": spt.inner.ClientID, + "jti": base64.URLEncoding.EncodeToString(jti), + "nbf": time.Now().Unix(), + "exp": time.Now().Add(time.Hour * 24).Unix(), + } + + signedString, err := token.SignedString(secret.PrivateKey) + return signedString, err +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +// It will populate the form submitted during oAuth Token Acquisition using a JWT signed with a certificate. +func (secret *ServicePrincipalCertificateSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + jwt, err := secret.SignJwt(spt) + if err != nil { + return err + } + + v.Set("client_assertion", jwt) + v.Set("client_assertion_type", "urn:ietf:params:oauth:client-assertion-type:jwt-bearer") + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalCertificateSecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalCertificateSecret is not supported") +} + +// ServicePrincipalMSISecret implements ServicePrincipalSecret for machines running the MSI Extension. +type ServicePrincipalMSISecret struct { +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (msiSecret *ServicePrincipalMSISecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (msiSecret ServicePrincipalMSISecret) MarshalJSON() ([]byte, error) { + return nil, errors.New("marshalling ServicePrincipalMSISecret is not supported") +} + +// ServicePrincipalUsernamePasswordSecret implements ServicePrincipalSecret for username and password auth. +type ServicePrincipalUsernamePasswordSecret struct { + Username string `json:"username"` + Password string `json:"password"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalUsernamePasswordSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("username", secret.Username) + v.Set("password", secret.Password) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalUsernamePasswordSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Username string `json:"username"` + Password string `json:"password"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalUsernamePasswordSecret", + Username: secret.Username, + Password: secret.Password, + }) +} + +// ServicePrincipalAuthorizationCodeSecret implements ServicePrincipalSecret for authorization code auth. +type ServicePrincipalAuthorizationCodeSecret struct { + ClientSecret string `json:"value"` + AuthorizationCode string `json:"authCode"` + RedirectURI string `json:"redirect"` +} + +// SetAuthenticationValues is a method of the interface ServicePrincipalSecret. +func (secret *ServicePrincipalAuthorizationCodeSecret) SetAuthenticationValues(spt *ServicePrincipalToken, v *url.Values) error { + v.Set("code", secret.AuthorizationCode) + v.Set("client_secret", secret.ClientSecret) + v.Set("redirect_uri", secret.RedirectURI) + return nil +} + +// MarshalJSON implements the json.Marshaler interface. +func (secret ServicePrincipalAuthorizationCodeSecret) MarshalJSON() ([]byte, error) { + type tokenType struct { + Type string `json:"type"` + Value string `json:"value"` + AuthCode string `json:"authCode"` + Redirect string `json:"redirect"` + } + return json.Marshal(tokenType{ + Type: "ServicePrincipalAuthorizationCodeSecret", + Value: secret.ClientSecret, + AuthCode: secret.AuthorizationCode, + Redirect: secret.RedirectURI, + }) +} + +// ServicePrincipalToken encapsulates a Token created for a Service Principal. +type ServicePrincipalToken struct { + inner servicePrincipalToken + refreshLock *sync.RWMutex + sender Sender + refreshCallbacks []TokenRefreshCallback + // MaxMSIRefreshAttempts is the maximum number of attempts to refresh an MSI token. + MaxMSIRefreshAttempts int +} + +// MarshalTokenJSON returns the marshalled inner token. +func (spt ServicePrincipalToken) MarshalTokenJSON() ([]byte, error) { + return json.Marshal(spt.inner.Token) +} + +// SetRefreshCallbacks replaces any existing refresh callbacks with the specified callbacks. +func (spt *ServicePrincipalToken) SetRefreshCallbacks(callbacks []TokenRefreshCallback) { + spt.refreshCallbacks = callbacks +} + +// MarshalJSON implements the json.Marshaler interface. +func (spt ServicePrincipalToken) MarshalJSON() ([]byte, error) { + return json.Marshal(spt.inner) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (spt *ServicePrincipalToken) UnmarshalJSON(data []byte) error { + // need to determine the token type + raw := map[string]interface{}{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + secret := raw["secret"].(map[string]interface{}) + switch secret["type"] { + case "ServicePrincipalNoSecret": + spt.inner.Secret = &ServicePrincipalNoSecret{} + case "ServicePrincipalTokenSecret": + spt.inner.Secret = &ServicePrincipalTokenSecret{} + case "ServicePrincipalCertificateSecret": + return errors.New("unmarshalling ServicePrincipalCertificateSecret is not supported") + case "ServicePrincipalMSISecret": + return errors.New("unmarshalling ServicePrincipalMSISecret is not supported") + case "ServicePrincipalUsernamePasswordSecret": + spt.inner.Secret = &ServicePrincipalUsernamePasswordSecret{} + case "ServicePrincipalAuthorizationCodeSecret": + spt.inner.Secret = &ServicePrincipalAuthorizationCodeSecret{} + default: + return fmt.Errorf("unrecognized token type '%s'", secret["type"]) + } + err = json.Unmarshal(data, &spt.inner) + if err != nil { + return err + } + spt.refreshLock = &sync.RWMutex{} + spt.sender = &http.Client{} + return nil +} + +// internal type used for marshalling/unmarshalling +type servicePrincipalToken struct { + Token Token `json:"token"` + Secret ServicePrincipalSecret `json:"secret"` + OauthConfig OAuthConfig `json:"oauth"` + ClientID string `json:"clientID"` + Resource string `json:"resource"` + AutoRefresh bool `json:"autoRefresh"` + RefreshWithin time.Duration `json:"refreshWithin"` +} + +func validateOAuthConfig(oac OAuthConfig) error { + if oac.IsZero() { + return fmt.Errorf("parameter 'oauthConfig' cannot be zero-initialized") + } + return nil +} + +// NewServicePrincipalTokenWithSecret create a ServicePrincipalToken using the supplied ServicePrincipalSecret implementation. +func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, resource string, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(id, "id"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + OauthConfig: oauthConfig, + Secret: secret, + ClientID: id, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{}, + refreshCallbacks: callbacks, + } + return spt, nil +} + +// NewServicePrincipalTokenFromManualToken creates a ServicePrincipalToken using the supplied token +func NewServicePrincipalTokenFromManualToken(oauthConfig OAuthConfig, clientID string, resource string, token Token, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalNoSecret{}, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalTokenFromManualTokenSecret creates a ServicePrincipalToken using the supplied token and secret +func NewServicePrincipalTokenFromManualTokenSecret(oauthConfig OAuthConfig, clientID string, resource string, token Token, secret ServicePrincipalSecret, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if secret == nil { + return nil, fmt.Errorf("parameter 'secret' cannot be nil") + } + if token.IsZero() { + return nil, fmt.Errorf("parameter 'token' cannot be zero-initialized") + } + spt, err := NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + secret, + callbacks...) + if err != nil { + return nil, err + } + + spt.inner.Token = token + + return spt, nil +} + +// NewServicePrincipalToken creates a ServicePrincipalToken from the supplied Service Principal +// credentials scoped to the named resource. +func NewServicePrincipalToken(oauthConfig OAuthConfig, clientID string, secret string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(secret, "secret"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalTokenSecret{ + ClientSecret: secret, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromCertificate creates a ServicePrincipalToken from the supplied pkcs12 bytes. +func NewServicePrincipalTokenFromCertificate(oauthConfig OAuthConfig, clientID string, certificate *x509.Certificate, privateKey *rsa.PrivateKey, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if certificate == nil { + return nil, fmt.Errorf("parameter 'certificate' cannot be nil") + } + if privateKey == nil { + return nil, fmt.Errorf("parameter 'privateKey' cannot be nil") + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalCertificateSecret{ + PrivateKey: privateKey, + Certificate: certificate, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromUsernamePassword creates a ServicePrincipalToken from the username and password. +func NewServicePrincipalTokenFromUsernamePassword(oauthConfig OAuthConfig, clientID string, username string, password string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(username, "username"); err != nil { + return nil, err + } + if err := validateStringParam(password, "password"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalUsernamePasswordSecret{ + Username: username, + Password: password, + }, + callbacks..., + ) +} + +// NewServicePrincipalTokenFromAuthorizationCode creates a ServicePrincipalToken from the +func NewServicePrincipalTokenFromAuthorizationCode(oauthConfig OAuthConfig, clientID string, clientSecret string, authorizationCode string, redirectURI string, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + + if err := validateOAuthConfig(oauthConfig); err != nil { + return nil, err + } + if err := validateStringParam(clientID, "clientID"); err != nil { + return nil, err + } + if err := validateStringParam(clientSecret, "clientSecret"); err != nil { + return nil, err + } + if err := validateStringParam(authorizationCode, "authorizationCode"); err != nil { + return nil, err + } + if err := validateStringParam(redirectURI, "redirectURI"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + + return NewServicePrincipalTokenWithSecret( + oauthConfig, + clientID, + resource, + &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: clientSecret, + AuthorizationCode: authorizationCode, + RedirectURI: redirectURI, + }, + callbacks..., + ) +} + +// GetMSIVMEndpoint gets the MSI endpoint on Virtual Machines. +func GetMSIVMEndpoint() (string, error) { + return msiEndpoint, nil +} + +// NewServicePrincipalTokenFromMSI creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the system assigned identity when creating the token. +func NewServicePrincipalTokenFromMSI(msiEndpoint, resource string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, nil, callbacks...) +} + +// NewServicePrincipalTokenFromMSIWithUserAssignedID creates a ServicePrincipalToken via the MSI VM Extension. +// It will use the specified user assigned identity when creating the token. +func NewServicePrincipalTokenFromMSIWithUserAssignedID(msiEndpoint, resource string, userAssignedID string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + return newServicePrincipalTokenFromMSI(msiEndpoint, resource, &userAssignedID, callbacks...) +} + +func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedID *string, callbacks ...TokenRefreshCallback) (*ServicePrincipalToken, error) { + if err := validateStringParam(msiEndpoint, "msiEndpoint"); err != nil { + return nil, err + } + if err := validateStringParam(resource, "resource"); err != nil { + return nil, err + } + if userAssignedID != nil { + if err := validateStringParam(*userAssignedID, "userAssignedID"); err != nil { + return nil, err + } + } + // We set the oauth config token endpoint to be MSI's endpoint + msiEndpointURL, err := url.Parse(msiEndpoint) + if err != nil { + return nil, err + } + + v := url.Values{} + v.Set("resource", resource) + v.Set("api-version", "2018-02-01") + if userAssignedID != nil { + v.Set("client_id", *userAssignedID) + } + msiEndpointURL.RawQuery = v.Encode() + + spt := &ServicePrincipalToken{ + inner: servicePrincipalToken{ + OauthConfig: OAuthConfig{ + TokenEndpoint: *msiEndpointURL, + }, + Secret: &ServicePrincipalMSISecret{}, + Resource: resource, + AutoRefresh: true, + RefreshWithin: defaultRefresh, + }, + refreshLock: &sync.RWMutex{}, + sender: &http.Client{}, + refreshCallbacks: callbacks, + MaxMSIRefreshAttempts: defaultMaxMSIRefreshAttempts, + } + + if userAssignedID != nil { + spt.inner.ClientID = *userAssignedID + } + + return spt, nil +} + +// internal type that implements TokenRefreshError +type tokenRefreshError struct { + message string + resp *http.Response +} + +// Error implements the error interface which is part of the TokenRefreshError interface. +func (tre tokenRefreshError) Error() string { + return tre.message +} + +// Response implements the TokenRefreshError interface, it returns the raw HTTP response from the refresh operation. +func (tre tokenRefreshError) Response() *http.Response { + return tre.resp +} + +func newTokenRefreshError(message string, resp *http.Response) TokenRefreshError { + return tokenRefreshError{message: message, resp: resp} +} + +// EnsureFresh will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFresh() error { + return spt.EnsureFreshWithContext(context.Background()) +} + +// EnsureFreshWithContext will refresh the token if it will expire within the refresh window (as set by +// RefreshWithin) and autoRefresh flag is on. This method is safe for concurrent use. +func (spt *ServicePrincipalToken) EnsureFreshWithContext(ctx context.Context) error { + if spt.inner.AutoRefresh && spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + // take the write lock then check to see if the token was already refreshed + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + if spt.inner.Token.WillExpireIn(spt.inner.RefreshWithin) { + return spt.refreshInternal(ctx, spt.inner.Resource) + } + } + return nil +} + +// InvokeRefreshCallbacks calls any TokenRefreshCallbacks that were added to the SPT during initialization +func (spt *ServicePrincipalToken) InvokeRefreshCallbacks(token Token) error { + if spt.refreshCallbacks != nil { + for _, callback := range spt.refreshCallbacks { + err := callback(spt.inner.Token) + if err != nil { + return fmt.Errorf("adal: TokenRefreshCallback handler failed. Error = '%v'", err) + } + } + } + return nil +} + +// Refresh obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) Refresh() error { + return spt.RefreshWithContext(context.Background()) +} + +// RefreshWithContext obtains a fresh token for the Service Principal. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshWithContext(ctx context.Context) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, spt.inner.Resource) +} + +// RefreshExchange refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchange(resource string) error { + return spt.RefreshExchangeWithContext(context.Background(), resource) +} + +// RefreshExchangeWithContext refreshes the token, but for a different resource. +// This method is not safe for concurrent use and should be syncrhonized. +func (spt *ServicePrincipalToken) RefreshExchangeWithContext(ctx context.Context, resource string) error { + spt.refreshLock.Lock() + defer spt.refreshLock.Unlock() + return spt.refreshInternal(ctx, resource) +} + +func (spt *ServicePrincipalToken) getGrantType() string { + switch spt.inner.Secret.(type) { + case *ServicePrincipalUsernamePasswordSecret: + return OAuthGrantTypeUserPass + case *ServicePrincipalAuthorizationCodeSecret: + return OAuthGrantTypeAuthorizationCode + default: + return OAuthGrantTypeClientCredentials + } +} + +func isIMDS(u url.URL) bool { + imds, err := url.Parse(msiEndpoint) + if err != nil { + return false + } + return u.Host == imds.Host && u.Path == imds.Path +} + +func (spt *ServicePrincipalToken) refreshInternal(ctx context.Context, resource string) error { + req, err := http.NewRequest(http.MethodPost, spt.inner.OauthConfig.TokenEndpoint.String(), nil) + if err != nil { + return fmt.Errorf("adal: Failed to build the refresh request. Error = '%v'", err) + } + req.Header.Add("User-Agent", version.UserAgent()) + req = req.WithContext(ctx) + if !isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + v := url.Values{} + v.Set("client_id", spt.inner.ClientID) + v.Set("resource", resource) + + if spt.inner.Token.RefreshToken != "" { + v.Set("grant_type", OAuthGrantTypeRefreshToken) + v.Set("refresh_token", spt.inner.Token.RefreshToken) + // web apps must specify client_secret when refreshing tokens + // see https://docs.microsoft.com/en-us/azure/active-directory/develop/active-directory-protocols-oauth-code#refreshing-the-access-tokens + if spt.getGrantType() == OAuthGrantTypeAuthorizationCode { + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + } else { + v.Set("grant_type", spt.getGrantType()) + err := spt.inner.Secret.SetAuthenticationValues(spt, &v) + if err != nil { + return err + } + } + + s := v.Encode() + body := ioutil.NopCloser(strings.NewReader(s)) + req.ContentLength = int64(len(s)) + req.Header.Set(contentType, mimeTypeFormPost) + req.Body = body + } + + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); ok { + req.Method = http.MethodGet + req.Header.Set(metadataHeader, "true") + } + + var resp *http.Response + if isIMDS(spt.inner.OauthConfig.TokenEndpoint) { + resp, err = retryForIMDS(spt.sender, req, spt.MaxMSIRefreshAttempts) + } else { + resp, err = spt.sender.Do(req) + } + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Failed to execute the refresh request. Error = '%v'", err), nil) + } + + defer resp.Body.Close() + rb, err := ioutil.ReadAll(resp.Body) + + if resp.StatusCode != http.StatusOK { + if err != nil { + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Failed reading response body: %v", resp.StatusCode, err), resp) + } + return newTokenRefreshError(fmt.Sprintf("adal: Refresh request failed. Status Code = '%d'. Response body: %s", resp.StatusCode, string(rb)), resp) + } + + // for the following error cases don't return a TokenRefreshError. the operation succeeded + // but some transient failure happened during deserialization. by returning a generic error + // the retry logic will kick in (we don't retry on TokenRefreshError). + + if err != nil { + return fmt.Errorf("adal: Failed to read a new service principal token during refresh. Error = '%v'", err) + } + if len(strings.Trim(string(rb), " ")) == 0 { + return fmt.Errorf("adal: Empty service principal token received during refresh") + } + var token Token + err = json.Unmarshal(rb, &token) + if err != nil { + return fmt.Errorf("adal: Failed to unmarshal the service principal token during refresh. Error = '%v' JSON = '%s'", err, string(rb)) + } + + spt.inner.Token = token + + return spt.InvokeRefreshCallbacks(token) +} + +// retry logic specific to retrieving a token from the IMDS endpoint +func retryForIMDS(sender Sender, req *http.Request, maxAttempts int) (resp *http.Response, err error) { + // copied from client.go due to circular dependency + retries := []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } + // extra retry status codes specific to IMDS + retries = append(retries, + http.StatusNotFound, + http.StatusGone, + // all remaining 5xx + http.StatusNotImplemented, + http.StatusHTTPVersionNotSupported, + http.StatusVariantAlsoNegotiates, + http.StatusInsufficientStorage, + http.StatusLoopDetected, + http.StatusNotExtended, + http.StatusNetworkAuthenticationRequired) + + // see https://docs.microsoft.com/en-us/azure/active-directory/managed-service-identity/how-to-use-vm-token#retry-guidance + + const maxDelay time.Duration = 60 * time.Second + + attempt := 0 + delay := time.Duration(0) + + for attempt < maxAttempts { + resp, err = sender.Do(req) + // retry on temporary network errors, e.g. transient network failures. + // if we don't receive a response then assume we can't connect to the + // endpoint so we're likely not running on an Azure VM so don't retry. + if (err != nil && !isTemporaryNetworkError(err)) || resp == nil || resp.StatusCode == http.StatusOK || !containsInt(retries, resp.StatusCode) { + return + } + + // perform exponential backoff with a cap. + // must increment attempt before calculating delay. + attempt++ + // the base value of 2 is the "delta backoff" as specified in the guidance doc + delay += (time.Duration(math.Pow(2, float64(attempt))) * time.Second) + if delay > maxDelay { + delay = maxDelay + } + + select { + case <-time.After(delay): + // intentionally left blank + case <-req.Context().Done(): + err = req.Context().Err() + return + } + } + return +} + +// returns true if the specified error is a temporary network error or false if it's not. +// if the error doesn't implement the net.Error interface the return value is true. +func isTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} + +// returns true if slice ints contains the value n +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +// SetAutoRefresh enables or disables automatic refreshing of stale tokens. +func (spt *ServicePrincipalToken) SetAutoRefresh(autoRefresh bool) { + spt.inner.AutoRefresh = autoRefresh +} + +// SetRefreshWithin sets the interval within which if the token will expire, EnsureFresh will +// refresh the token. +func (spt *ServicePrincipalToken) SetRefreshWithin(d time.Duration) { + spt.inner.RefreshWithin = d + return +} + +// SetSender sets the http.Client used when obtaining the Service Principal token. An +// undecorated http.Client is used by default. +func (spt *ServicePrincipalToken) SetSender(s Sender) { spt.sender = s } + +// OAuthToken implements the OAuthTokenProvider interface. It returns the current access token. +func (spt *ServicePrincipalToken) OAuthToken() string { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token.OAuthToken() +} + +// Token returns a copy of the current token. +func (spt *ServicePrincipalToken) Token() Token { + spt.refreshLock.RLock() + defer spt.refreshLock.RUnlock() + return spt.inner.Token +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go new file mode 100644 index 000000000000..5a2ab8c02c93 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/adal/token_test.go @@ -0,0 +1,923 @@ +package adal + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/json" + "fmt" + "io/ioutil" + "math/big" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/date" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + defaultFormData = "client_id=id&client_secret=secret&grant_type=client_credentials&resource=resource" + defaultManualFormData = "client_id=id&grant_type=refresh_token&refresh_token=refreshtoken&resource=resource" +) + +func TestTokenExpires(t *testing.T) { + tt := time.Now().Add(5 * time.Second) + tk := newTokenExpiresAt(tt) + + if tk.Expires().Equal(tt) { + t.Fatalf("adal: Token#Expires miscalculated expiration time -- received %v, expected %v", tk.Expires(), tt) + } +} + +func TestTokenIsExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(-5 * time.Second)) + + if !tk.IsExpired() { + t.Fatalf("adal: Token#IsExpired failed to mark a stale token as expired -- now %v, token expires at %v", + time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenIsExpiredUninitialized(t *testing.T) { + tk := &Token{} + + if !tk.IsExpired() { + t.Fatalf("adal: An uninitialized Token failed to mark itself as expired (expiration time %v)", tk.Expires()) + } +} + +func TestTokenIsNoExpired(t *testing.T) { + tk := newTokenExpiresAt(time.Now().Add(1000 * time.Second)) + + if tk.IsExpired() { + t.Fatalf("adal: Token marked a fresh token as expired -- now %v, token expires at %v", time.Now().UTC(), tk.Expires()) + } +} + +func TestTokenWillExpireIn(t *testing.T) { + d := 5 * time.Second + tk := newTokenExpiresIn(d) + + if !tk.WillExpireIn(d) { + t.Fatal("adal: Token#WillExpireIn mismeasured expiration time") + } +} + +func TestServicePrincipalTokenSetAutoRefresh(t *testing.T) { + spt := newServicePrincipalToken() + + if !spt.inner.AutoRefresh { + t.Fatal("adal: ServicePrincipalToken did not default to automatic token refreshing") + } + + spt.SetAutoRefresh(false) + if spt.inner.AutoRefresh { + t.Fatal("adal: ServicePrincipalToken#SetAutoRefresh did not disable automatic token refreshing") + } +} + +func TestServicePrincipalTokenSetRefreshWithin(t *testing.T) { + spt := newServicePrincipalToken() + + if spt.inner.RefreshWithin != defaultRefresh { + t.Fatal("adal: ServicePrincipalToken did not correctly set the default refresh interval") + } + + spt.SetRefreshWithin(2 * defaultRefresh) + if spt.inner.RefreshWithin != 2*defaultRefresh { + t.Fatal("adal: ServicePrincipalToken#SetRefreshWithin did not set the refresh interval") + } +} + +func TestServicePrincipalTokenSetSender(t *testing.T) { + spt := newServicePrincipalToken() + + c := &http.Client{} + spt.SetSender(c) + if !reflect.DeepEqual(c, spt.sender) { + t.Fatal("adal: ServicePrincipalToken#SetSender did not set the sender") + } +} + +func TestServicePrincipalTokenRefreshUsesPOST(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "POST" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "POST", r.Method) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenFromMSIRefreshUsesGET(t *testing.T) { + resource := "https://resource" + cb := func(token Token) error { return nil } + + spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Method != "GET" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set HTTP method -- expected %v, received %v", "GET", r.Method) + } + if h := r.Header.Get("Metadata"); h != "true" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Metadata header for MSI") + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err = spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + + if body.IsOpen() { + t.Fatalf("the response was not closed!") + } +} + +func TestServicePrincipalTokenFromMSIRefreshCancel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + endpoint, _ := GetMSIVMEndpoint() + + spt, err := NewServicePrincipalTokenFromMSI(endpoint, "https://resource") + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + c := mocks.NewSender() + c.AppendAndRepeatResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError), 5) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + end := time.Now() + + go func() { + spt.SetSender(c) + err = spt.RefreshWithContext(ctx) + end = time.Now() + wg.Done() + }() + + cancel() + wg.Wait() + time.Sleep(5 * time.Millisecond) + + if end.Sub(start) >= time.Second { + t.Fatalf("TestServicePrincipalTokenFromMSIRefreshCancel failed to cancel") + } +} + +func TestServicePrincipalTokenRefreshSetsMimeType(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.Header.Get(http.CanonicalHeaderKey("Content-Type")) != "application/x-www-form-urlencoded" { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set Content-Type -- expected %v, received %v", + "application/x-form-urlencoded", + r.Header.Get(http.CanonicalHeaderKey("Content-Type"))) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func TestServicePrincipalTokenRefreshSetsURL(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if r.URL.String() != TestOAuthConfig.TokenEndpoint.String() { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the URL -- expected %v, received %v", + TestOAuthConfig.TokenEndpoint, r.URL) + } + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func testServicePrincipalTokenRefreshSetsBody(t *testing.T, spt *ServicePrincipalToken, f func(*testing.T, []byte)) { + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("adal: Failed to read body of Service Principal token request (%v)", err) + } + f(t, b) + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } +} + +func TestServicePrincipalTokenManualRefreshSetsBody(t *testing.T) { + sptManual := newServicePrincipalTokenManual() + testServicePrincipalTokenRefreshSetsBody(t, sptManual, func(t *testing.T, b []byte) { + if string(b) != defaultManualFormData { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultManualFormData, string(b)) + } + }) +} + +func TestServicePrincipalTokenCertficateRefreshSetsBody(t *testing.T) { + sptCert := newServicePrincipalTokenCertificate(t) + testServicePrincipalTokenRefreshSetsBody(t, sptCert, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_assertion_type"][0] != "urn:ietf:params:oauth:client-assertion-type:jwt-bearer" || + values["client_id"][0] != "id" || + values["grant_type"][0] != "client_credentials" || + values["resource"][0] != "resource" { + t.Fatalf("adal: ServicePrincipalTokenCertificate#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenUsernamePasswordRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalTokenUsernamePassword(t) + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_id"][0] != "id" || + values["grant_type"][0] != "password" || + values["username"][0] != "username" || + values["password"][0] != "password" || + values["resource"][0] != "resource" { + t.Fatalf("adal: ServicePrincipalTokenUsernamePassword#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenAuthorizationCodeRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalTokenAuthorizationCode(t) + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_id"][0] != "id" || + values["grant_type"][0] != OAuthGrantTypeAuthorizationCode || + values["code"][0] != "code" || + values["client_secret"][0] != "clientSecret" || + values["redirect_uri"][0] != "http://redirectUri/getToken" || + values["resource"][0] != "resource" { + t.Fatalf("adal: ServicePrincipalTokenAuthorizationCode#Refresh did not correctly set the HTTP Request Body.") + } + }) + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + body := string(b) + + values, _ := url.ParseQuery(body) + if values["client_id"][0] != "id" || + values["grant_type"][0] != OAuthGrantTypeRefreshToken || + values["code"][0] != "code" || + values["client_secret"][0] != "clientSecret" || + values["redirect_uri"][0] != "http://redirectUri/getToken" || + values["resource"][0] != "resource" { + t.Fatalf("adal: ServicePrincipalTokenAuthorizationCode#Refresh did not correctly set the HTTP Request Body.") + } + }) +} + +func TestServicePrincipalTokenSecretRefreshSetsBody(t *testing.T) { + spt := newServicePrincipalToken() + testServicePrincipalTokenRefreshSetsBody(t, spt, func(t *testing.T, b []byte) { + if string(b) != defaultFormData { + t.Fatalf("adal: ServicePrincipalToken#Refresh did not correctly set the HTTP Request Body -- expected %v, received %v", + defaultFormData, string(b)) + } + + }) +} + +func TestServicePrincipalTokenRefreshClosesRequestBody(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + if resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("adal: ServicePrincipalToken#Refresh failed to close the HTTP Response Body") + } +} + +func TestServicePrincipalTokenRefreshRejectsResponsesWithStatusNotOK(t *testing.T) { + spt := newServicePrincipalToken() + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusUnauthorized, "Unauthorized") + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh should reject a response with status != %d", http.StatusOK) + } +} + +func TestServicePrincipalTokenRefreshRejectsEmptyBody(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + err := spt.Refresh() + if err == nil { + t.Fatal("adal: ServicePrincipalToken#Refresh should reject an empty token") + } +} + +func TestServicePrincipalTokenRefreshPropagatesErrors(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.SetError(fmt.Errorf("Faux Error")) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatal("adal: Failed to propagate the request error") + } +} + +func TestServicePrincipalTokenRefreshReturnsErrorIfNotOk(t *testing.T) { + spt := newServicePrincipalToken() + + c := mocks.NewSender() + c.AppendResponse(mocks.NewResponseWithStatus("401 NotAuthorized", http.StatusUnauthorized)) + spt.SetSender(c) + + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: Failed to return an when receiving a status code other than HTTP %d", http.StatusOK) + } +} + +func TestServicePrincipalTokenRefreshUnmarshals(t *testing.T) { + spt := newServicePrincipalToken() + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + j := newTokenJSON(expiresOn, "resource") + resp := mocks.NewResponseWithContent(j) + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } else if spt.inner.Token.AccessToken != "accessToken" || + spt.inner.Token.ExpiresIn != "3600" || + spt.inner.Token.ExpiresOn != expiresOn || + spt.inner.Token.NotBefore != expiresOn || + spt.inner.Token.Resource != "resource" || + spt.inner.Token.Type != "Bearer" { + t.Fatalf("adal: ServicePrincipalToken#Refresh failed correctly unmarshal the JSON -- expected %v, received %v", + j, *spt) + } +} + +func TestServicePrincipalTokenEnsureFreshRefreshes(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.inner.Token) + + body := mocks.NewBody(newTokenJSON("test", "test")) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + + f := false + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return resp, nil + }) + } + })()) + spt.SetSender(s) + err := spt.EnsureFresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) + } + if !f { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh failed to call Refresh for stale token") + } +} + +func TestServicePrincipalTokenEnsureFreshFails(t *testing.T) { + spt := newServicePrincipalToken() + expireToken(&spt.inner.Token) + + c := mocks.NewSender() + c.SetError(fmt.Errorf("some failure")) + + spt.SetSender(c) + err := spt.EnsureFresh() + if err == nil { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh didn't return an error") + } + if _, ok := err.(TokenRefreshError); !ok { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh didn't return a TokenRefreshError") + } +} + +func TestServicePrincipalTokenEnsureFreshSkipsIfFresh(t *testing.T) { + spt := newServicePrincipalToken() + setTokenToExpireIn(&spt.inner.Token, 1000*time.Second) + + f := false + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return mocks.NewResponse(), nil + }) + } + })()) + spt.SetSender(s) + err := spt.EnsureFresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#EnsureFresh returned an unexpected error (%v)", err) + } + if f { + t.Fatal("adal: ServicePrincipalToken#EnsureFresh invoked Refresh for fresh token") + } +} + +func TestRefreshCallback(t *testing.T) { + callbackTriggered := false + spt := newServicePrincipalToken(func(Token) error { + callbackTriggered = true + return nil + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + if err != nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh returned an unexpected error (%v)", err) + } + if !callbackTriggered { + t.Fatalf("adal: RefreshCallback failed to trigger call callback") + } +} + +func TestRefreshCallbackErrorPropagates(t *testing.T) { + errorText := "this is an error text" + spt := newServicePrincipalToken(func(Token) error { + return fmt.Errorf(errorText) + }) + + expiresOn := strconv.Itoa(int(time.Now().Add(3600 * time.Second).Sub(date.UnixEpoch()).Seconds())) + + sender := mocks.NewSender() + j := newTokenJSON(expiresOn, "resource") + sender.AppendResponse(mocks.NewResponseWithContent(j)) + spt.SetSender(sender) + err := spt.Refresh() + + if err == nil || !strings.Contains(err.Error(), errorText) { + t.Fatalf("adal: RefreshCallback failed to propagate error") + } +} + +// This demonstrates the danger of manual token without a refresh token +func TestServicePrincipalTokenManualRefreshFailsWithoutRefresh(t *testing.T) { + spt := newServicePrincipalTokenManual() + spt.inner.Token.RefreshToken = "" + err := spt.Refresh() + if err == nil { + t.Fatalf("adal: ServicePrincipalToken#Refresh should have failed with a ManualTokenSecret without a refresh token") + } +} + +func TestNewServicePrincipalTokenFromMSI(t *testing.T) { + resource := "https://resource" + cb := func(token Token) error { return nil } + + spt, err := NewServicePrincipalTokenFromMSI("http://msiendpoint/", resource, cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + // check some of the SPT fields + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); !ok { + t.Fatal("SPT secret was not of MSI type") + } + + if spt.inner.Resource != resource { + t.Fatal("SPT came back with incorrect resource") + } + + if len(spt.refreshCallbacks) != 1 { + t.Fatal("SPT had incorrect refresh callbacks.") + } +} + +func TestNewServicePrincipalTokenFromMSIWithUserAssignedID(t *testing.T) { + resource := "https://resource" + userID := "abc123" + cb := func(token Token) error { return nil } + + spt, err := NewServicePrincipalTokenFromMSIWithUserAssignedID("http://msiendpoint/", resource, userID, cb) + if err != nil { + t.Fatalf("Failed to get MSI SPT: %v", err) + } + + // check some of the SPT fields + if _, ok := spt.inner.Secret.(*ServicePrincipalMSISecret); !ok { + t.Fatal("SPT secret was not of MSI type") + } + + if spt.inner.Resource != resource { + t.Fatal("SPT came back with incorrect resource") + } + + if len(spt.refreshCallbacks) != 1 { + t.Fatal("SPT had incorrect refresh callbacks.") + } + + if spt.inner.ClientID != userID { + t.Fatal("SPT had incorrect client ID") + } +} + +func TestNewServicePrincipalTokenFromManualTokenSecret(t *testing.T) { + token := newToken() + secret := &ServicePrincipalAuthorizationCodeSecret{ + ClientSecret: "clientSecret", + AuthorizationCode: "code123", + RedirectURI: "redirect", + } + + spt, err := NewServicePrincipalTokenFromManualTokenSecret(TestOAuthConfig, "id", "resource", *token, secret, nil) + if err != nil { + t.Fatalf("Failed creating new SPT: %s", err) + } + + if !reflect.DeepEqual(*token, spt.inner.Token) { + t.Fatalf("Tokens do not match: %s, %s", *token, spt.inner.Token) + } + + if !reflect.DeepEqual(secret, spt.inner.Secret) { + t.Fatalf("Secrets do not match: %s, %s", secret, spt.inner.Secret) + } + +} + +func TestGetVMEndpoint(t *testing.T) { + endpoint, err := GetMSIVMEndpoint() + if err != nil { + t.Fatal("Coudn't get VM endpoint") + } + + if endpoint != msiEndpoint { + t.Fatal("Didn't get correct endpoint") + } +} + +func TestMarshalServicePrincipalNoSecret(t *testing.T) { + spt := newServicePrincipalTokenManual() + b, err := json.Marshal(spt) + if err != nil { + t.Fatalf("failed to marshal token: %+v", err) + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err != nil { + t.Fatalf("failed to unmarshal token: %+v", err) + } + if !reflect.DeepEqual(spt, spt2) { + t.Fatal("tokens don't match") + } +} + +func TestMarshalServicePrincipalTokenSecret(t *testing.T) { + spt := newServicePrincipalToken() + b, err := json.Marshal(spt) + if err != nil { + t.Fatalf("failed to marshal token: %+v", err) + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err != nil { + t.Fatalf("failed to unmarshal token: %+v", err) + } + if !reflect.DeepEqual(spt, spt2) { + t.Fatal("tokens don't match") + } +} + +func TestMarshalServicePrincipalCertificateSecret(t *testing.T) { + spt := newServicePrincipalTokenCertificate(t) + b, err := json.Marshal(spt) + if err == nil { + t.Fatal("expected error when marshalling certificate token") + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err == nil { + t.Fatal("expected error when unmarshalling certificate token") + } +} + +func TestMarshalServicePrincipalMSISecret(t *testing.T) { + spt, err := newServicePrincipalTokenFromMSI("http://msiendpoint/", "https://resource", nil) + if err != nil { + t.Fatalf("failed to get MSI SPT: %+v", err) + } + b, err := json.Marshal(spt) + if err == nil { + t.Fatal("expected error when marshalling MSI token") + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err == nil { + t.Fatal("expected error when unmarshalling MSI token") + } +} + +func TestMarshalServicePrincipalUsernamePasswordSecret(t *testing.T) { + spt := newServicePrincipalTokenUsernamePassword(t) + b, err := json.Marshal(spt) + if err != nil { + t.Fatalf("failed to marshal token: %+v", err) + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err != nil { + t.Fatalf("failed to unmarshal token: %+v", err) + } + if !reflect.DeepEqual(spt, spt2) { + t.Fatal("tokens don't match") + } +} + +func TestMarshalServicePrincipalAuthorizationCodeSecret(t *testing.T) { + spt := newServicePrincipalTokenAuthorizationCode(t) + b, err := json.Marshal(spt) + if err != nil { + t.Fatalf("failed to marshal token: %+v", err) + } + var spt2 *ServicePrincipalToken + err = json.Unmarshal(b, &spt2) + if err != nil { + t.Fatalf("failed to unmarshal token: %+v", err) + } + if !reflect.DeepEqual(spt, spt2) { + t.Fatal("tokens don't match") + } +} + +func TestMarshalInnerToken(t *testing.T) { + spt := newServicePrincipalTokenManual() + tokenJSON, err := spt.MarshalTokenJSON() + if err != nil { + t.Fatalf("failed to marshal token: %+v", err) + } + + testToken := newToken() + testToken.RefreshToken = "refreshtoken" + + testTokenJSON, err := json.Marshal(testToken) + if err != nil { + t.Fatalf("failed to marshal test token: %+v", err) + } + + if !reflect.DeepEqual(tokenJSON, testTokenJSON) { + t.Fatalf("tokens don't match: %s, %s", tokenJSON, testTokenJSON) + } + + var t1 *Token + err = json.Unmarshal(tokenJSON, &t1) + if err != nil { + t.Fatalf("failed to unmarshal token: %+v", err) + } + + if !reflect.DeepEqual(t1, testToken) { + t.Fatalf("tokens don't match: %s, %s", t1, testToken) + } +} + +func newToken() *Token { + return &Token{ + AccessToken: "ASECRETVALUE", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } +} + +func newTokenJSON(expiresOn string, resource string) string { + return fmt.Sprintf(`{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "%s", + "not_before" : "%s", + "resource" : "%s", + "token_type" : "Bearer", + "refresh_token": "ABC123" + }`, + expiresOn, expiresOn, resource) +} + +func newTokenExpiresIn(expireIn time.Duration) *Token { + return setTokenToExpireIn(newToken(), expireIn) +} + +func newTokenExpiresAt(expireAt time.Time) *Token { + return setTokenToExpireAt(newToken(), expireAt) +} + +func expireToken(t *Token) *Token { + return setTokenToExpireIn(t, 0) +} + +func setTokenToExpireAt(t *Token, expireAt time.Time) *Token { + t.ExpiresIn = "3600" + t.ExpiresOn = strconv.Itoa(int(expireAt.Sub(date.UnixEpoch()).Seconds())) + t.NotBefore = t.ExpiresOn + return t +} + +func setTokenToExpireIn(t *Token, expireIn time.Duration) *Token { + return setTokenToExpireAt(t, time.Now().Add(expireIn)) +} + +func newServicePrincipalToken(callbacks ...TokenRefreshCallback) *ServicePrincipalToken { + spt, _ := NewServicePrincipalToken(TestOAuthConfig, "id", "secret", "resource", callbacks...) + return spt +} + +func newServicePrincipalTokenManual() *ServicePrincipalToken { + token := newToken() + token.RefreshToken = "refreshtoken" + spt, _ := NewServicePrincipalTokenFromManualToken(TestOAuthConfig, "id", "resource", *token) + return spt +} + +func newServicePrincipalTokenCertificate(t *testing.T) *ServicePrincipalToken { + template := x509.Certificate{ + SerialNumber: big.NewInt(0), + Subject: pkix.Name{CommonName: "test"}, + BasicConstraintsValid: true, + } + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + t.Fatal(err) + } + certificateBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &privateKey.PublicKey, privateKey) + if err != nil { + t.Fatal(err) + } + certificate, err := x509.ParseCertificate(certificateBytes) + if err != nil { + t.Fatal(err) + } + + spt, _ := NewServicePrincipalTokenFromCertificate(TestOAuthConfig, "id", certificate, privateKey, "resource") + return spt +} + +func newServicePrincipalTokenUsernamePassword(t *testing.T) *ServicePrincipalToken { + spt, _ := NewServicePrincipalTokenFromUsernamePassword(TestOAuthConfig, "id", "username", "password", "resource") + return spt +} + +func newServicePrincipalTokenAuthorizationCode(t *testing.T) *ServicePrincipalToken { + spt, _ := NewServicePrincipalTokenFromAuthorizationCode(TestOAuthConfig, "id", "clientSecret", "code", "http://redirectUri/getToken", "resource") + return spt +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization.go b/vendor/github.com/Azure/go-autorest/autorest/authorization.go new file mode 100644 index 000000000000..77eff45bddb5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization.go @@ -0,0 +1,259 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +const ( + bearerChallengeHeader = "Www-Authenticate" + bearer = "Bearer" + tenantID = "tenantID" + apiKeyAuthorizerHeader = "Ocp-Apim-Subscription-Key" + bingAPISdkHeader = "X-BingApis-SDK-Client" + golangBingAPISdkHeaderValue = "Go-SDK" +) + +// Authorizer is the interface that provides a PrepareDecorator used to supply request +// authorization. Most often, the Authorizer decorator runs last so it has access to the full +// state of the formed HTTP request. +type Authorizer interface { + WithAuthorization() PrepareDecorator +} + +// NullAuthorizer implements a default, "do nothing" Authorizer. +type NullAuthorizer struct{} + +// WithAuthorization returns a PrepareDecorator that does nothing. +func (na NullAuthorizer) WithAuthorization() PrepareDecorator { + return WithNothing() +} + +// APIKeyAuthorizer implements API Key authorization. +type APIKeyAuthorizer struct { + headers map[string]interface{} + queryParameters map[string]interface{} +} + +// NewAPIKeyAuthorizerWithHeaders creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizerWithHeaders(headers map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(headers, nil) +} + +// NewAPIKeyAuthorizerWithQueryParameters creates an ApiKeyAuthorizer with query parameters. +func NewAPIKeyAuthorizerWithQueryParameters(queryParameters map[string]interface{}) *APIKeyAuthorizer { + return NewAPIKeyAuthorizer(nil, queryParameters) +} + +// NewAPIKeyAuthorizer creates an ApiKeyAuthorizer with headers. +func NewAPIKeyAuthorizer(headers map[string]interface{}, queryParameters map[string]interface{}) *APIKeyAuthorizer { + return &APIKeyAuthorizer{headers: headers, queryParameters: queryParameters} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP headers and Query Paramaters +func (aka *APIKeyAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return DecoratePreparer(p, WithHeaders(aka.headers), WithQueryParameters(aka.queryParameters)) + } +} + +// CognitiveServicesAuthorizer implements authorization for Cognitive Services. +type CognitiveServicesAuthorizer struct { + subscriptionKey string +} + +// NewCognitiveServicesAuthorizer is +func NewCognitiveServicesAuthorizer(subscriptionKey string) *CognitiveServicesAuthorizer { + return &CognitiveServicesAuthorizer{subscriptionKey: subscriptionKey} +} + +// WithAuthorization is +func (csa *CognitiveServicesAuthorizer) WithAuthorization() PrepareDecorator { + headers := make(map[string]interface{}) + headers[apiKeyAuthorizerHeader] = csa.subscriptionKey + headers[bingAPISdkHeader] = golangBingAPISdkHeaderValue + + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} + +// BearerAuthorizer implements the bearer authorization +type BearerAuthorizer struct { + tokenProvider adal.OAuthTokenProvider +} + +// NewBearerAuthorizer crates a BearerAuthorizer using the given token provider +func NewBearerAuthorizer(tp adal.OAuthTokenProvider) *BearerAuthorizer { + return &BearerAuthorizer{tokenProvider: tp} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the token. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (ba *BearerAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // the ordering is important here, prefer RefresherWithContext if available + if refresher, ok := ba.tokenProvider.(adal.RefresherWithContext); ok { + err = refresher.EnsureFreshWithContext(r.Context()) + } else if refresher, ok := ba.tokenProvider.(adal.Refresher); ok { + err = refresher.EnsureFresh() + } + if err != nil { + var resp *http.Response + if tokError, ok := err.(adal.TokenRefreshError); ok { + resp = tokError.Response() + } + return r, NewErrorWithError(err, "azure.BearerAuthorizer", "WithAuthorization", resp, + "Failed to refresh the Token for request to %s", r.URL) + } + return Prepare(r, WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", ba.tokenProvider.OAuthToken()))) + } + return r, err + }) + } +} + +// BearerAuthorizerCallbackFunc is the authentication callback signature. +type BearerAuthorizerCallbackFunc func(tenantID, resource string) (*BearerAuthorizer, error) + +// BearerAuthorizerCallback implements bearer authorization via a callback. +type BearerAuthorizerCallback struct { + sender Sender + callback BearerAuthorizerCallbackFunc +} + +// NewBearerAuthorizerCallback creates a bearer authorization callback. The callback +// is invoked when the HTTP request is submitted. +func NewBearerAuthorizerCallback(sender Sender, callback BearerAuthorizerCallbackFunc) *BearerAuthorizerCallback { + if sender == nil { + sender = &http.Client{} + } + return &BearerAuthorizerCallback{sender: sender, callback: callback} +} + +// WithAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose value +// is "Bearer " followed by the token. The BearerAuthorizer is obtained via a user-supplied callback. +// +// By default, the token will be automatically refreshed through the Refresher interface. +func (bacb *BearerAuthorizerCallback) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + // make a copy of the request and remove the body as it's not + // required and avoids us having to create a copy of it. + rCopy := *r + removeRequestBody(&rCopy) + + resp, err := bacb.sender.Do(&rCopy) + if err == nil && resp.StatusCode == 401 { + defer resp.Body.Close() + if hasBearerChallenge(resp) { + bc, err := newBearerChallenge(resp) + if err != nil { + return r, err + } + if bacb.callback != nil { + ba, err := bacb.callback(bc.values[tenantID], bc.values["resource"]) + if err != nil { + return r, err + } + return Prepare(r, ba.WithAuthorization()) + } + } + } + } + return r, err + }) + } +} + +// returns true if the HTTP response contains a bearer challenge +func hasBearerChallenge(resp *http.Response) bool { + authHeader := resp.Header.Get(bearerChallengeHeader) + if len(authHeader) == 0 || strings.Index(authHeader, bearer) < 0 { + return false + } + return true +} + +type bearerChallenge struct { + values map[string]string +} + +func newBearerChallenge(resp *http.Response) (bc bearerChallenge, err error) { + challenge := strings.TrimSpace(resp.Header.Get(bearerChallengeHeader)) + trimmedChallenge := challenge[len(bearer)+1:] + + // challenge is a set of key=value pairs that are comma delimited + pairs := strings.Split(trimmedChallenge, ",") + if len(pairs) < 1 { + err = fmt.Errorf("challenge '%s' contains no pairs", challenge) + return bc, err + } + + bc.values = make(map[string]string) + for i := range pairs { + trimmedPair := strings.TrimSpace(pairs[i]) + pair := strings.Split(trimmedPair, "=") + if len(pair) == 2 { + // remove the enclosing quotes + key := strings.Trim(pair[0], "\"") + value := strings.Trim(pair[1], "\"") + + switch key { + case "authorization", "authorization_uri": + // strip the tenant ID from the authorization URL + asURL, err := url.Parse(value) + if err != nil { + return bc, err + } + bc.values[tenantID] = asURL.Path[1:] + default: + bc.values[key] = value + } + } + } + + return bc, err +} + +// EventGridKeyAuthorizer implements authorization for event grid using key authentication. +type EventGridKeyAuthorizer struct { + topicKey string +} + +// NewEventGridKeyAuthorizer creates a new EventGridKeyAuthorizer +// with the specified topic key. +func NewEventGridKeyAuthorizer(topicKey string) EventGridKeyAuthorizer { + return EventGridKeyAuthorizer{topicKey: topicKey} +} + +// WithAuthorization returns a PrepareDecorator that adds the aeg-sas-key authentication header. +func (egta EventGridKeyAuthorizer) WithAuthorization() PrepareDecorator { + headers := map[string]interface{}{ + "aeg-sas-key": egta.topicKey, + } + return NewAPIKeyAuthorizerWithHeaders(headers).WithAuthorization() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go new file mode 100644 index 000000000000..331d4234f43a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/authorization_test.go @@ -0,0 +1,230 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + TestTenantID = "TestTenantID" + TestActiveDirectoryEndpoint = "https://login/test.com/" +) + +func TestWithAuthorizer(t *testing.T) { + r1 := mocks.NewRequest() + + na := &NullAuthorizer{} + r2, err := Prepare(r1, + na.WithAuthorization()) + if err != nil { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: NullAuthorizer#WithAuthorization modified the request -- received %v, expected %v", r2, r1) + } +} + +func TestTokenWithAuthorization(t *testing.T) { + token := &adal.Token{ + AccessToken: "TestToken", + Resource: "https://azure.microsoft.com/", + Type: "Bearer", + } + + ba := NewBearerAuthorizer(token) + req, err := Prepare(&http.Request{}, ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", token.AccessToken) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationNoRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt.SetAutoRefresh(false) + s := mocks.NewSender() + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.OAuthToken()) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } +} + +func TestServicePrincipalTokenWithAuthorizationRefresh(t *testing.T) { + + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + refreshed := false + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", func(t adal.Token) error { + refreshed = true + return nil + }) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + jwt := `{ + "access_token" : "accessToken", + "expires_in" : "3600", + "expires_on" : "test", + "not_before" : "test", + "resource" : "test", + "token_type" : "Bearer" + }` + body := mocks.NewBody(jwt) + resp := mocks.NewResponseWithBodyAndStatus(body, http.StatusOK, "OK") + c := mocks.NewSender() + s := DecorateSender(c, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return resp, nil + }) + } + })()) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + req, err := Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey("Authorization")) != fmt.Sprintf("Bearer %s", spt.OAuthToken()) { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to set Authorization header") + } + + if !refreshed { + t.Fatal("azure: BearerAuthorizer#WithAuthorization must refresh the token") + } +} + +func TestServicePrincipalTokenWithAuthorizationReturnsErrorIfConnotRefresh(t *testing.T) { + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, TestTenantID) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", "resource", nil) + if err != nil { + t.Fatalf("azure: BearerAuthorizer#WithAuthorization returned an error (%v)", err) + } + + s := mocks.NewSender() + s.AppendResponse(mocks.NewResponseWithStatus("400 Bad Request", http.StatusBadRequest)) + spt.SetSender(s) + + ba := NewBearerAuthorizer(spt) + _, err = Prepare(mocks.NewRequest(), ba.WithAuthorization()) + if err == nil { + t.Fatal("azure: BearerAuthorizer#WithAuthorization failed to return an error when refresh fails") + } +} + +func TestBearerAuthorizerCallback(t *testing.T) { + tenantString := "123-tenantID-456" + resourceString := "https://fake.resource.net" + + s := mocks.NewSender() + resp := mocks.NewResponseWithStatus("401 Unauthorized", http.StatusUnauthorized) + mocks.SetResponseHeader(resp, bearerChallengeHeader, bearer+" \"authorization\"=\"https://fake.net/"+tenantString+"\",\"resource\"=\""+resourceString+"\"") + s.AppendResponse(resp) + + auth := NewBearerAuthorizerCallback(s, func(tenantID, resource string) (*BearerAuthorizer, error) { + if tenantID != tenantString { + t.Fatal("BearerAuthorizerCallback: bad tenant ID") + } + if resource != resourceString { + t.Fatal("BearerAuthorizerCallback: bad resource") + } + + oauthConfig, err := adal.NewOAuthConfig(TestActiveDirectoryEndpoint, tenantID) + if err != nil { + t.Fatalf("azure: NewOAuthConfig returned an error (%v)", err) + } + + spt, err := adal.NewServicePrincipalToken(*oauthConfig, "id", "secret", resource) + if err != nil { + t.Fatalf("azure: NewServicePrincipalToken returned an error (%v)", err) + } + + spt.SetSender(s) + return NewBearerAuthorizer(spt), nil + }) + + _, err := Prepare(mocks.NewRequest(), auth.WithAuthorization()) + if err == nil { + t.Fatal("azure: BearerAuthorizerCallback#WithAuthorization failed to return an error when refresh fails") + } +} + +func TestApiKeyAuthorization(t *testing.T) { + + headers := make(map[string]interface{}) + queryParameters := make(map[string]interface{}) + + dummyAuthHeader := "dummyAuthHeader" + dummyAuthHeaderValue := "dummyAuthHeaderValue" + + dummyAuthQueryParameter := "dummyAuthQueryParameter" + dummyAuthQueryParameterValue := "dummyAuthQueryParameterValue" + + headers[dummyAuthHeader] = dummyAuthHeaderValue + queryParameters[dummyAuthQueryParameter] = dummyAuthQueryParameterValue + + aka := NewAPIKeyAuthorizer(headers, queryParameters) + + req, err := Prepare(mocks.NewRequest(), aka.WithAuthorization()) + + if err != nil { + t.Fatalf("azure: APIKeyAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey(dummyAuthHeader)) != dummyAuthHeaderValue { + t.Fatalf("azure: APIKeyAuthorizer#WithAuthorization failed to set %s header", dummyAuthHeader) + + } else if req.URL.Query().Get(dummyAuthQueryParameter) != dummyAuthQueryParameterValue { + t.Fatalf("azure: APIKeyAuthorizer#WithAuthorization failed to set %s query parameter", dummyAuthQueryParameterValue) + } +} + +func TestCognitivesServicesAuthorization(t *testing.T) { + subscriptionKey := "dummyKey" + csa := NewCognitiveServicesAuthorizer(subscriptionKey) + req, err := Prepare(mocks.NewRequest(), csa.WithAuthorization()) + + if err != nil { + t.Fatalf("azure: CognitiveServicesAuthorizer#WithAuthorization returned an error (%v)", err) + } else if req.Header.Get(http.CanonicalHeaderKey(bingAPISdkHeader)) != golangBingAPISdkHeaderValue { + t.Fatalf("azure: CognitiveServicesAuthorizer#WithAuthorization failed to set %s header", bingAPISdkHeader) + } else if req.Header.Get(http.CanonicalHeaderKey(apiKeyAuthorizerHeader)) != subscriptionKey { + t.Fatalf("azure: CognitiveServicesAuthorizer#WithAuthorization failed to set %s header", apiKeyAuthorizerHeader) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest.go b/vendor/github.com/Azure/go-autorest/autorest/autorest.go new file mode 100644 index 000000000000..aafdf021fd6f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest.go @@ -0,0 +1,150 @@ +/* +Package autorest implements an HTTP request pipeline suitable for use across multiple go-routines +and provides the shared routines relied on by AutoRest (see https://github.com/Azure/autorest/) +generated Go code. + +The package breaks sending and responding to HTTP requests into three phases: Preparing, Sending, +and Responding. A typical pattern is: + + req, err := Prepare(&http.Request{}, + token.WithAuthorization()) + + resp, err := Send(req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusInternalServerError), + DoCloseIfError(), + DoRetryForAttempts(5, time.Second)) + + err = Respond(resp, + ByDiscardingBody(), + ByClosing()) + +Each phase relies on decorators to modify and / or manage processing. Decorators may first modify +and then pass the data along, pass the data first and then modify the result, or wrap themselves +around passing the data (such as a logger might do). Decorators run in the order provided. For +example, the following: + + req, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + +will set the URL to: + + https://microsoft.com/a/b/c + +Preparers and Responders may be shared and re-used (assuming the underlying decorators support +sharing and re-use). Performant use is obtained by creating one or more Preparers and Responders +shared among multiple go-routines, and a single Sender shared among multiple sending go-routines, +all bound together by means of input / output channels. + +Decorators hold their passed state within a closure (such as the path components in the example +above). Be careful to share Preparers and Responders only in a context where such held state +applies. For example, it may not make sense to share a Preparer that applies a query string from a +fixed set of values. Similarly, sharing a Responder that reads the response body into a passed +struct (e.g., ByUnmarshallingJson) is likely incorrect. + +Lastly, the Swagger specification (https://swagger.io) that drives AutoRest +(https://github.com/Azure/autorest/) precisely defines two date forms: date and date-time. The +github.com/Azure/go-autorest/autorest/date package provides time.Time derivations to ensure +correct parsing and formatting. + +Errors raised by autorest objects and methods will conform to the autorest.Error interface. + +See the included examples for more detail. For details on the suggested use of this package by +generated clients, see the Client described below. +*/ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "net/http" + "time" +) + +const ( + // HeaderLocation specifies the HTTP Location header. + HeaderLocation = "Location" + + // HeaderRetryAfter specifies the HTTP Retry-After header. + HeaderRetryAfter = "Retry-After" +) + +// ResponseHasStatusCode returns true if the status code in the HTTP Response is in the passed set +// and false otherwise. +func ResponseHasStatusCode(resp *http.Response, codes ...int) bool { + if resp == nil { + return false + } + return containsInt(codes, resp.StatusCode) +} + +// GetLocation retrieves the URL from the Location header of the passed response. +func GetLocation(resp *http.Response) string { + return resp.Header.Get(HeaderLocation) +} + +// GetRetryAfter extracts the retry delay from the Retry-After header of the passed response. If +// the header is absent or is malformed, it will return the supplied default delay time.Duration. +func GetRetryAfter(resp *http.Response, defaultDelay time.Duration) time.Duration { + retry := resp.Header.Get(HeaderRetryAfter) + if retry == "" { + return defaultDelay + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + return defaultDelay + } + + return d +} + +// NewPollingRequest allocates and returns a new http.Request to poll for the passed response. +func NewPollingRequest(resp *http.Response, cancel <-chan struct{}) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequest", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare(&http.Request{Cancel: cancel}, + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequest", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} + +// NewPollingRequestWithContext allocates and returns a new http.Request with the specified context to poll for the passed response. +func NewPollingRequestWithContext(ctx context.Context, resp *http.Response) (*http.Request, error) { + location := GetLocation(resp) + if location == "" { + return nil, NewErrorWithResponse("autorest", "NewPollingRequestWithContext", resp, "Location header missing from response that requires polling") + } + + req, err := Prepare((&http.Request{}).WithContext(ctx), + AsGet(), + WithBaseURL(location)) + if err != nil { + return nil, NewErrorWithError(err, "autorest", "NewPollingRequestWithContext", nil, "Failure creating poll request to %s", location) + } + + return req, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go b/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go new file mode 100644 index 000000000000..467ea438b2c5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/autorest_test.go @@ -0,0 +1,140 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "net/http" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestResponseHasStatusCode(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusAccepted} + if !ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode failed to find %v in %v", resp.StatusCode, codes) + } +} + +func TestResponseHasStatusCodeNotPresent(t *testing.T) { + codes := []int{http.StatusOK, http.StatusAccepted} + resp := &http.Response{StatusCode: http.StatusInternalServerError} + if ResponseHasStatusCode(resp, codes...) { + t.Fatalf("autorest: ResponseHasStatusCode unexpectedly found %v in %v", resp.StatusCode, codes) + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenLocationHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("500 InternalServerError", http.StatusInternalServerError) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when the Location header was missing") + } +} + +func TestNewPollingRequestReturnsAnErrorWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + _, err := NewPollingRequest(resp, nil) + if err == nil { + t.Fatal("autorest: NewPollingRequest failed to return an error when Prepare fails") + } +} + +func TestNewPollingRequestDoesNotReturnARequestWhenPrepareFails(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderLocation), mocks.TestBadURL) + + req, _ := NewPollingRequest(resp, nil) + if req != nil { + t.Fatal("autorest: NewPollingRequest returned an http.Request when Prepare failed") + } +} + +func TestNewPollingRequestReturnsAGetRequest(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.Method != "GET" { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP GET request -- actual method %v", req.Method) + } +} + +func TestNewPollingRequestProvidesTheURL(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + req, _ := NewPollingRequest(resp, nil) + if req.URL.String() != mocks.TestURL { + t.Fatalf("autorest: NewPollingRequest did not create an HTTP with the expected URL -- received %v, expected %v", req.URL, mocks.TestURL) + } +} + +func TestGetLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + l := GetLocation(resp) + if len(l) == 0 { + t.Fatalf("autorest: GetLocation failed to return Location header -- expected %v, received %v", mocks.TestURL, l) + } +} + +func TestGetLocationReturnsEmptyStringForMissingLocation(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + l := GetLocation(resp) + if len(l) != 0 { + t.Fatalf("autorest: GetLocation return a value without a Location header -- received %v", l) + } +} + +func TestGetRetryAfter(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != mocks.TestDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the expected delay -- expected %v, received %v", mocks.TestDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMissing(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a missing Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} + +func TestGetRetryAfterReturnsDefaultDelayIfRetryHeaderIsMalformed(t *testing.T) { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + resp.Header.Set(http.CanonicalHeaderKey(HeaderRetryAfter), "a very bad non-integer value") + + d := GetRetryAfter(resp, DefaultPollingDelay) + if d != DefaultPollingDelay { + t.Fatalf("autorest: GetRetryAfter failed to returned the default delay for a malformed Retry-After header -- expected %v, received %v", + DefaultPollingDelay, d) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go new file mode 100644 index 000000000000..7d8c177ade4e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async.go @@ -0,0 +1,957 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + headerAsyncOperation = "Azure-AsyncOperation" +) + +const ( + operationInProgress string = "InProgress" + operationCanceled string = "Canceled" + operationFailed string = "Failed" + operationSucceeded string = "Succeeded" +) + +var pollingCodes = [...]int{http.StatusNoContent, http.StatusAccepted, http.StatusCreated, http.StatusOK} + +// Future provides a mechanism to access the status and results of an asynchronous request. +// Since futures are stateful they should be passed by value to avoid race conditions. +type Future struct { + req *http.Request // legacy + pt pollingTracker +} + +// NewFuture returns a new Future object initialized with the specified request. +// Deprecated: Please use NewFutureFromResponse instead. +func NewFuture(req *http.Request) Future { + return Future{req: req} +} + +// NewFutureFromResponse returns a new Future object initialized +// with the initial response from an asynchronous operation. +func NewFutureFromResponse(resp *http.Response) (Future, error) { + pt, err := createPollingTracker(resp) + if err != nil { + return Future{}, err + } + return Future{pt: pt}, nil +} + +// Response returns the last HTTP response. +func (f Future) Response() *http.Response { + if f.pt == nil { + return nil + } + return f.pt.latestResponse() +} + +// Status returns the last status message of the operation. +func (f Future) Status() string { + if f.pt == nil { + return "" + } + return f.pt.pollingStatus() +} + +// PollingMethod returns the method used to monitor the status of the asynchronous operation. +func (f Future) PollingMethod() PollingMethodType { + if f.pt == nil { + return PollingUnknown + } + return f.pt.pollingMethod() +} + +// Done queries the service to see if the operation has completed. +func (f *Future) Done(sender autorest.Sender) (bool, error) { + // support for legacy Future implementation + if f.req != nil { + resp, err := sender.Do(f.req) + if err != nil { + return false, err + } + pt, err := createPollingTracker(resp) + if err != nil { + return false, err + } + f.pt = pt + f.req = nil + } + // end legacy + if f.pt == nil { + return false, autorest.NewError("Future", "Done", "future is not initialized") + } + if f.pt.hasTerminated() { + return true, f.pt.pollingError() + } + if err := f.pt.pollForStatus(sender); err != nil { + return false, err + } + if err := f.pt.checkForErrors(); err != nil { + return f.pt.hasTerminated(), err + } + if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil { + return false, err + } + if err := f.pt.initPollingMethod(); err != nil { + return false, err + } + if err := f.pt.updatePollingMethod(); err != nil { + return false, err + } + return f.pt.hasTerminated(), f.pt.pollingError() +} + +// GetPollingDelay returns a duration the application should wait before checking +// the status of the asynchronous request and true; this value is returned from +// the service via the Retry-After response header. If the header wasn't returned +// then the function returns the zero-value time.Duration and false. +func (f Future) GetPollingDelay() (time.Duration, bool) { + if f.pt == nil { + return 0, false + } + resp := f.pt.latestResponse() + if resp == nil { + return 0, false + } + + retry := resp.Header.Get(autorest.HeaderRetryAfter) + if retry == "" { + return 0, false + } + + d, err := time.ParseDuration(retry + "s") + if err != nil { + panic(err) + } + + return d, true +} + +// WaitForCompletion will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +// Deprecated: Please use WaitForCompletionRef() instead. +func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) error { + return f.WaitForCompletionRef(ctx, client) +} + +// WaitForCompletionRef will return when one of the following conditions is met: the long +// running operation has completed, the provided context is cancelled, or the client's +// polling duration has been exceeded. It will retry failed polling attempts based on +// the retry value defined in the client up to the maximum retry attempts. +func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error { + ctx, cancel := context.WithTimeout(ctx, client.PollingDuration) + defer cancel() + done, err := f.Done(client) + for attempts := 0; !done; done, err = f.Done(client) { + if attempts >= client.RetryAttempts { + return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded") + } + // we want delayAttempt to be zero in the non-error case so + // that DelayForBackoff doesn't perform exponential back-off + var delayAttempt int + var delay time.Duration + if err == nil { + // check for Retry-After delay, if not present use the client's polling delay + var ok bool + delay, ok = f.GetPollingDelay() + if !ok { + delay = client.PollingDelay + } + } else { + // there was an error polling for status so perform exponential + // back-off based on the number of attempts using the client's retry + // duration. update attempts after delayAttempt to avoid off-by-one. + delayAttempt = attempts + delay = client.RetryDuration + attempts++ + } + // wait until the delay elapses or the context is cancelled + delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done()) + if !delayElapsed { + return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled") + } + } + return err +} + +// MarshalJSON implements the json.Marshaler interface. +func (f Future) MarshalJSON() ([]byte, error) { + return json.Marshal(f.pt) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (f *Future) UnmarshalJSON(data []byte) error { + // unmarshal into JSON object to determine the tracker type + obj := map[string]interface{}{} + err := json.Unmarshal(data, &obj) + if err != nil { + return err + } + if obj["method"] == nil { + return autorest.NewError("Future", "UnmarshalJSON", "missing 'method' property") + } + method := obj["method"].(string) + switch strings.ToUpper(method) { + case http.MethodDelete: + f.pt = &pollingTrackerDelete{} + case http.MethodPatch: + f.pt = &pollingTrackerPatch{} + case http.MethodPost: + f.pt = &pollingTrackerPost{} + case http.MethodPut: + f.pt = &pollingTrackerPut{} + default: + return autorest.NewError("Future", "UnmarshalJSON", "unsupoorted method '%s'", method) + } + // now unmarshal into the tracker + return json.Unmarshal(data, &f.pt) +} + +// PollingURL returns the URL used for retrieving the status of the long-running operation. +func (f Future) PollingURL() string { + if f.pt == nil { + return "" + } + return f.pt.pollingURL() +} + +// GetResult should be called once polling has completed successfully. +// It makes the final GET call to retrieve the resultant payload. +func (f Future) GetResult(sender autorest.Sender) (*http.Response, error) { + if f.pt.finalGetURL() == "" { + // we can end up in this situation if the async operation returns a 200 + // with no polling URLs. in that case return the response which should + // contain the JSON payload (only do this for successful terminal cases). + if lr := f.pt.latestResponse(); lr != nil && f.pt.hasSucceeded() { + return lr, nil + } + return nil, autorest.NewError("Future", "GetResult", "missing URL for retrieving result") + } + req, err := http.NewRequest(http.MethodGet, f.pt.finalGetURL(), nil) + if err != nil { + return nil, err + } + return sender.Do(req) +} + +type pollingTracker interface { + // these methods can differ per tracker + + // checks the response headers and status code to determine the polling mechanism + updatePollingMethod() error + + // checks the response for tracker-specific error conditions + checkForErrors() error + + // returns true if provisioning state should be checked + provisioningStateApplicable() bool + + // methods common to all trackers + + // initializes a tracker's polling URL and method, called for each iteration. + // these values can be overridden by each polling tracker as required. + initPollingMethod() error + + // initializes the tracker's internal state, call this when the tracker is created + initializeState() error + + // makes an HTTP request to check the status of the LRO + pollForStatus(sender autorest.Sender) error + + // updates internal tracker state, call this after each call to pollForStatus + updatePollingState(provStateApl bool) error + + // returns the error response from the service, can be nil + pollingError() error + + // returns the polling method being used + pollingMethod() PollingMethodType + + // returns the state of the LRO as returned from the service + pollingStatus() string + + // returns the URL used for polling status + pollingURL() string + + // returns the URL used for the final GET to retrieve the resource + finalGetURL() string + + // returns true if the LRO is in a terminal state + hasTerminated() bool + + // returns true if the LRO is in a failed terminal state + hasFailed() bool + + // returns true if the LRO is in a successful terminal state + hasSucceeded() bool + + // returns the cached HTTP response after a call to pollForStatus(), can be nil + latestResponse() *http.Response +} + +type pollingTrackerBase struct { + // resp is the last response, either from the submission of the LRO or from polling + resp *http.Response + + // method is the HTTP verb, this is needed for deserialization + Method string `json:"method"` + + // rawBody is the raw JSON response body + rawBody map[string]interface{} + + // denotes if polling is using async-operation or location header + Pm PollingMethodType `json:"pollingMethod"` + + // the URL to poll for status + URI string `json:"pollingURI"` + + // the state of the LRO as returned from the service + State string `json:"lroState"` + + // the URL to GET for the final result + FinalGetURI string `json:"resultURI"` + + // used to hold an error object returned from the service + Err *ServiceError `json:"error,omitempty"` +} + +func (pt *pollingTrackerBase) initializeState() error { + // determine the initial polling state based on response body and/or HTTP status + // code. this is applicable to the initial LRO response, not polling responses! + pt.Method = pt.resp.Request.Method + if err := pt.updateRawBody(); err != nil { + return err + } + switch pt.resp.StatusCode { + case http.StatusOK: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + if pt.hasFailed() { + pt.updateErrorFromResponse() + return pt.pollingError() + } + } else { + pt.State = operationSucceeded + } + case http.StatusCreated: + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationInProgress + } + case http.StatusAccepted: + pt.State = operationInProgress + case http.StatusNoContent: + pt.State = operationSucceeded + default: + pt.State = operationFailed + pt.updateErrorFromResponse() + return pt.pollingError() + } + return pt.initPollingMethod() +} + +func (pt pollingTrackerBase) getProvisioningState() *string { + if pt.rawBody != nil && pt.rawBody["properties"] != nil { + p := pt.rawBody["properties"].(map[string]interface{}) + if ps := p["provisioningState"]; ps != nil { + s := ps.(string) + return &s + } + } + return nil +} + +func (pt *pollingTrackerBase) updateRawBody() error { + pt.rawBody = map[string]interface{}{} + if pt.resp.ContentLength != 0 { + defer pt.resp.Body.Close() + b, err := ioutil.ReadAll(pt.resp.Body) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body") + } + // put the body back so it's available to other callers + pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + if err = json.Unmarshal(b, &pt.rawBody); err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to unmarshal response body") + } + } + return nil +} + +func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error { + req, err := http.NewRequest(http.MethodGet, pt.URI, nil) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request") + } + // attach the context from the original request if available (it will be absent for deserialized futures) + if pt.resp != nil { + req = req.WithContext(pt.resp.Request.Context()) + } + pt.resp, err = sender.Do(req) + if err != nil { + return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request") + } + if autorest.ResponseHasStatusCode(pt.resp, pollingCodes[:]...) { + // reset the service error on success case + pt.Err = nil + err = pt.updateRawBody() + } else { + // check response body for error content + pt.updateErrorFromResponse() + } + return err +} + +// attempts to unmarshal a ServiceError type from the response body. +// if that fails then make a best attempt at creating something meaningful. +// NOTE: this assumes that the async operation has failed. +func (pt *pollingTrackerBase) updateErrorFromResponse() { + var err error + if pt.resp.ContentLength != 0 { + type respErr struct { + ServiceError *ServiceError `json:"error"` + } + re := respErr{} + defer pt.resp.Body.Close() + var b []byte + if b, err = ioutil.ReadAll(pt.resp.Body); err != nil { + goto Default + } + if err = json.Unmarshal(b, &re); err != nil { + goto Default + } + // unmarshalling the error didn't yield anything, try unwrapped error + if re.ServiceError == nil { + err = json.Unmarshal(b, &re.ServiceError) + if err != nil { + goto Default + } + } + // the unmarshaller will ensure re.ServiceError is non-nil + // even if there was no content unmarshalled so check the code. + if re.ServiceError.Code != "" { + pt.Err = re.ServiceError + return + } + } +Default: + se := &ServiceError{ + Code: pt.pollingStatus(), + Message: "The async operation failed.", + } + if err != nil { + se.InnerError = make(map[string]interface{}) + se.InnerError["unmarshalError"] = err.Error() + } + // stick the response body into the error object in hopes + // it contains something useful to help diagnose the failure. + if len(pt.rawBody) > 0 { + se.AdditionalInfo = []map[string]interface{}{ + pt.rawBody, + } + } + pt.Err = se +} + +func (pt *pollingTrackerBase) updatePollingState(provStateApl bool) error { + if pt.Pm == PollingAsyncOperation && pt.rawBody["status"] != nil { + pt.State = pt.rawBody["status"].(string) + } else { + if pt.resp.StatusCode == http.StatusAccepted { + pt.State = operationInProgress + } else if provStateApl { + if ps := pt.getProvisioningState(); ps != nil { + pt.State = *ps + } else { + pt.State = operationSucceeded + } + } else { + return autorest.NewError("pollingTrackerBase", "updatePollingState", "the response from the async operation has an invalid status code") + } + } + // if the operation has failed update the error state + if pt.hasFailed() { + pt.updateErrorFromResponse() + } + return nil +} + +func (pt pollingTrackerBase) pollingError() error { + if pt.Err == nil { + return nil + } + return pt.Err +} + +func (pt pollingTrackerBase) pollingMethod() PollingMethodType { + return pt.Pm +} + +func (pt pollingTrackerBase) pollingStatus() string { + return pt.State +} + +func (pt pollingTrackerBase) pollingURL() string { + return pt.URI +} + +func (pt pollingTrackerBase) finalGetURL() string { + return pt.FinalGetURI +} + +func (pt pollingTrackerBase) hasTerminated() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) || strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) hasFailed() bool { + return strings.EqualFold(pt.State, operationCanceled) || strings.EqualFold(pt.State, operationFailed) +} + +func (pt pollingTrackerBase) hasSucceeded() bool { + return strings.EqualFold(pt.State, operationSucceeded) +} + +func (pt pollingTrackerBase) latestResponse() *http.Response { + return pt.resp +} + +// error checking common to all trackers +func (pt pollingTrackerBase) baseCheckForErrors() error { + // for Azure-AsyncOperations the response body cannot be nil or empty + if pt.Pm == PollingAsyncOperation { + if pt.resp.Body == nil || pt.resp.ContentLength == 0 { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "for Azure-AsyncOperation response body cannot be nil") + } + if pt.rawBody["status"] == nil { + return autorest.NewError("pollingTrackerBase", "baseCheckForErrors", "missing status property in Azure-AsyncOperation response body") + } + } + return nil +} + +// default initialization of polling URL/method. each verb tracker will update this as required. +func (pt *pollingTrackerBase) initPollingMethod() error { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + return nil + } + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh != "" { + pt.URI = lh + pt.Pm = PollingLocation + return nil + } + // it's ok if we didn't find a polling header, this will be handled elsewhere + return nil +} + +// DELETE + +type pollingTrackerDelete struct { + pollingTrackerBase +} + +func (pt *pollingTrackerDelete) updatePollingMethod() error { + // for 201 the Location header is required + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerDelete", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + } + pt.Pm = PollingLocation + pt.FinalGetURI = pt.URI + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerDelete) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerDelete) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PATCH + +type pollingTrackerPatch struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPatch) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + // note the absense of the "final GET" mechanism for PATCH + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + if ao == "" { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPatch", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } else { + pt.URI = lh + pt.Pm = PollingLocation + } + } + } + return nil +} + +func (pt pollingTrackerPatch) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPatch) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// POST + +type pollingTrackerPost struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPost) updatePollingMethod() error { + // 201 requires Location header + if pt.resp.StatusCode == http.StatusCreated { + if lh, err := getURLFromLocationHeader(pt.resp); err != nil { + return err + } else if lh == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "missing Location header in 201 response") + } else { + pt.URI = lh + pt.FinalGetURI = lh + pt.Pm = PollingLocation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPost", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPost) checkForErrors() error { + return pt.baseCheckForErrors() +} + +func (pt pollingTrackerPost) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusNoContent +} + +// PUT + +type pollingTrackerPut struct { + pollingTrackerBase +} + +func (pt *pollingTrackerPut) updatePollingMethod() error { + // by default we can use the original URL for polling and final GET + if pt.URI == "" { + pt.URI = pt.resp.Request.URL.String() + } + if pt.FinalGetURI == "" { + pt.FinalGetURI = pt.resp.Request.URL.String() + } + if pt.Pm == PollingUnknown { + pt.Pm = PollingRequestURI + } + // for 201 it's permissible for no headers to be returned + if pt.resp.StatusCode == http.StatusCreated { + if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + } + // for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary + if pt.resp.StatusCode == http.StatusAccepted { + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } else if ao != "" { + pt.URI = ao + pt.Pm = PollingAsyncOperation + } + // if the Location header is invalid and we already have a polling URL + // then we don't care if the Location header URL is malformed. + if lh, err := getURLFromLocationHeader(pt.resp); err != nil && pt.URI == "" { + return err + } else if lh != "" { + if ao == "" { + pt.URI = lh + pt.Pm = PollingLocation + } + // when both headers are returned we use the value in the Location header for the final GET + pt.FinalGetURI = lh + } + // make sure a polling URL was found + if pt.URI == "" { + return autorest.NewError("pollingTrackerPut", "updateHeaders", "didn't get any suitable polling URLs in 202 response") + } + } + return nil +} + +func (pt pollingTrackerPut) checkForErrors() error { + err := pt.baseCheckForErrors() + if err != nil { + return err + } + // if there are no LRO headers then the body cannot be empty + ao, err := getURLFromAsyncOpHeader(pt.resp) + if err != nil { + return err + } + lh, err := getURLFromLocationHeader(pt.resp) + if err != nil { + return err + } + if ao == "" && lh == "" && len(pt.rawBody) == 0 { + return autorest.NewError("pollingTrackerPut", "checkForErrors", "the response did not contain a body") + } + return nil +} + +func (pt pollingTrackerPut) provisioningStateApplicable() bool { + return pt.resp.StatusCode == http.StatusOK || pt.resp.StatusCode == http.StatusCreated +} + +// creates a polling tracker based on the verb of the original request +func createPollingTracker(resp *http.Response) (pollingTracker, error) { + var pt pollingTracker + switch strings.ToUpper(resp.Request.Method) { + case http.MethodDelete: + pt = &pollingTrackerDelete{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPatch: + pt = &pollingTrackerPatch{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPost: + pt = &pollingTrackerPost{pollingTrackerBase: pollingTrackerBase{resp: resp}} + case http.MethodPut: + pt = &pollingTrackerPut{pollingTrackerBase: pollingTrackerBase{resp: resp}} + default: + return nil, autorest.NewError("azure", "createPollingTracker", "unsupported HTTP method %s", resp.Request.Method) + } + if err := pt.initializeState(); err != nil { + return pt, err + } + // this initializes the polling header values, we do this during creation in case the + // initial response send us invalid values; this way the API call will return a non-nil + // error (not doing this means the error shows up in Future.Done) + return pt, pt.updatePollingMethod() +} + +// gets the polling URL from the Azure-AsyncOperation header. +// ensures the URL is well-formed and absolute. +func getURLFromAsyncOpHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(headerAsyncOperation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromAsyncOpHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// gets the polling URL from the Location header. +// ensures the URL is well-formed and absolute. +func getURLFromLocationHeader(resp *http.Response) (string, error) { + s := resp.Header.Get(http.CanonicalHeaderKey(autorest.HeaderLocation)) + if s == "" { + return "", nil + } + if !isValidURL(s) { + return "", autorest.NewError("azure", "getURLFromLocationHeader", "invalid polling URL '%s'", s) + } + return s, nil +} + +// verify that the URL is valid and absolute +func isValidURL(s string) bool { + u, err := url.Parse(s) + return err == nil && u.IsAbs() +} + +// DoPollForAsynchronous returns a SendDecorator that polls if the http.Response is for an Azure +// long-running operation. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled via +// the context associated with the http.Request. +// Deprecated: Prefer using Futures to allow for non-blocking async operations. +func DoPollForAsynchronous(delay time.Duration) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + return resp, err + } + if !autorest.ResponseHasStatusCode(resp, pollingCodes[:]...) { + return resp, nil + } + future, err := NewFutureFromResponse(resp) + if err != nil { + return resp, err + } + // retry until either the LRO completes or we receive an error + var done bool + for done, err = future.Done(s); !done && err == nil; done, err = future.Done(s) { + // check for Retry-After delay, if not present use the specified polling delay + if pd, ok := future.GetPollingDelay(); ok { + delay = pd + } + // wait until the delay elapses or the context is cancelled + if delayElapsed := autorest.DelayForBackoff(delay, 0, r.Context().Done()); !delayElapsed { + return future.Response(), + autorest.NewErrorWithError(r.Context().Err(), "azure", "DoPollForAsynchronous", future.Response(), "context has been cancelled") + } + } + return future.Response(), err + }) + } +} + +// PollingMethodType defines a type used for enumerating polling mechanisms. +type PollingMethodType string + +const ( + // PollingAsyncOperation indicates the polling method uses the Azure-AsyncOperation header. + PollingAsyncOperation PollingMethodType = "AsyncOperation" + + // PollingLocation indicates the polling method uses the Location header. + PollingLocation PollingMethodType = "Location" + + // PollingRequestURI indicates the polling method uses the original request URI. + PollingRequestURI PollingMethodType = "RequestURI" + + // PollingUnknown indicates an unknown polling method and is the default value. + PollingUnknown PollingMethodType = "" +) + +// AsyncOpIncompleteError is the type that's returned from a future that has not completed. +type AsyncOpIncompleteError struct { + // FutureType is the name of the type composed of a azure.Future. + FutureType string +} + +// Error returns an error message including the originating type name of the error. +func (e AsyncOpIncompleteError) Error() string { + return fmt.Sprintf("%s: asynchronous operation has not completed", e.FutureType) +} + +// NewAsyncOpIncompleteError creates a new AsyncOpIncompleteError with the specified parameters. +func NewAsyncOpIncompleteError(futureType string) AsyncOpIncompleteError { + return AsyncOpIncompleteError{ + FutureType: futureType, + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go new file mode 100644 index 000000000000..3d5c3213ce10 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/async_test.go @@ -0,0 +1,1155 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "reflect" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestCreateFromInvalidRequestVerb(t *testing.T) { + resp := mocks.NewResponseWithBodyAndStatus(nil, http.StatusOK, "some status") + resp.Request = mocks.NewRequestWithParams(http.MethodGet, mocks.TestURL, nil) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +// DELETE +func TestCreateDeleteTracker201Success(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusCreated, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreateDeleteTracker201FailNoLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusCreated, nil) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreateDeleteTracker201FailBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusCreated, nil) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreateDeleteTracker202SuccessAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != "" { + t.Fatal("expected empty GET URL") + } +} + +func TestCreateDeleteTracker202SuccessLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreateDeleteTracker202SuccessBoth(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreateDeleteTracker202SuccessBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != "" { + t.Fatal("expected empty GET URL") + } +} + +func TestCreateDeleteTracker202FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreateDeleteTracker202FailBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +// PATCH + +func TestCreatePatchTracker201Success(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusCreated, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePatchTracker201SuccessNoHeaders(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusCreated, nil) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingRequestURI { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePatchTracker201FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusCreated, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreatePatchTracker202SuccessAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePatchTracker202SuccessLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePatchTracker202SuccessBoth(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePatchTracker202FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreatePatchTracker202FailBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPatch, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +// POST + +func TestCreatePostTracker201Success(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusCreated, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePostTracker201FailNoHeader(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusCreated, nil) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil err") + } +} + +func TestCreatePostTracker201FailBadHeader(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusCreated, nil) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil err") + } +} + +func TestCreatePostTracker202SuccessAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != "" { + t.Fatal("expected empty final GET URL") + } +} + +func TestCreatePostTracker202SuccessLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URI: %s", pt.finalGetURL()) + } +} + +func TestCreatePostTracker202SuccessBoth(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePostTracker202SuccessBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != "" { + t.Fatal("expected empty final GET URL") + } +} + +func TestCreatePostTracker202FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreatePostTracker202FailBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusAccepted, nil) + _, err := createPollingTracker(resp) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +// PUT + +func TestCreatePutTracker201SuccessAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusCreated, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker201SuccessNoHeaders(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusCreated, nil) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingRequestURI { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker201FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusCreated, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestCreatePutTracker202SuccessAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker202SuccessLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingLocation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker202SuccessBoth(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestLocationURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestLocationURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker202SuccessBadLocation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestAzureAsyncURL) + mocks.SetLocationHeader(resp, mocks.TestBadURL) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pt.pollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong polling method: %s", pt.pollingMethod()) + } + if pt.finalGetURL() != mocks.TestURL { + t.Fatalf("wrong final GET URL: %s", pt.finalGetURL()) + } +} + +func TestCreatePutTracker202FailBadAsyncOp(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + setAsyncOpHeader(resp, mocks.TestBadURL) + _, err := createPollingTracker(resp) + if err == nil { + t.Fatal("unexpected nil error") + } +} + +func TestPollPutTrackerSuccessNoHeaders(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + sender := mocks.NewSender() + sender.AppendResponse(newProvisioningStatusResponse("InProgress")) + err = pt.pollForStatus(sender) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + err = pt.checkForErrors() + if err != nil { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestPollPutTrackerFailNoHeadersEmptyBody(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusAccepted, nil) + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + sender := mocks.NewSender() + sender.AppendResponse(mocks.NewResponseWithBodyAndStatus(&mocks.Body{}, http.StatusOK, "status ok")) + err = pt.pollForStatus(sender) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + err = pt.checkForErrors() + if err == nil { + t.Fatalf("unexpected nil error") + } +} + +// errors + +func TestAsyncPollingReturnsWrappedError(t *testing.T) { + resp := newSimpleAsyncResp() + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + sender := mocks.NewSender() + sender.AppendResponse(newOperationResourceErrorResponse("Failed")) + err = pt.pollForStatus(sender) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + err = pt.pollingError() + if err == nil { + t.Fatal("unexpected nil polling error") + } + if se, ok := err.(*ServiceError); !ok { + t.Fatal("incorrect error type") + } else if se.Code == "" { + t.Fatal("empty service error code") + } else if se.Message == "" { + t.Fatal("empty service error message") + } +} + +func TestLocationPollingReturnsWrappedError(t *testing.T) { + resp := newSimpleLocationResp() + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + sender := mocks.NewSender() + sender.AppendResponse(newProvisioningStatusErrorResponse("Failed")) + err = pt.pollForStatus(sender) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + err = pt.pollingError() + if err == nil { + t.Fatal("unexpected nil polling error") + } + if se, ok := err.(*ServiceError); !ok { + t.Fatal("incorrect error type") + } else if se.Code == "" { + t.Fatal("empty service error code") + } else if se.Message == "" { + t.Fatal("empty service error message") + } +} + +func TestLocationPollingReturnsUnwrappedError(t *testing.T) { + resp := newSimpleLocationResp() + pt, err := createPollingTracker(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + sender := mocks.NewSender() + sender.AppendResponse(newProvisioningStatusUnwrappedErrorResponse("Failed")) + err = pt.pollForStatus(sender) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + err = pt.pollingError() + if err == nil { + t.Fatal("unexpected nil polling error") + } + if se, ok := err.(*ServiceError); !ok { + t.Fatal("incorrect error type") + } else if se.Code == "" { + t.Fatal("empty service error code") + } else if se.Message == "" { + t.Fatal("empty service error message") + } +} + +// DoPollForAsynchronous (legacy so not many tests plus it builds on Future which has more tests) + +func TestDoPollForAsynchronous_Success(t *testing.T) { + r1 := newSimpleAsyncResp() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + sender := mocks.NewSender() + sender.AppendResponse(r1) + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(sender, newAsyncReq(http.MethodPut, nil), DoPollForAsynchronous(time.Millisecond)) + if err != nil { + t.Fatalf("failed to poll for status: %v", err) + } + + if sender.Attempts() < sender.NumResponses() { + t.Fatal("DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + autorest.Respond(r, autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_Failed(t *testing.T) { + r1 := newSimpleAsyncResp() + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + sender := mocks.NewSender() + sender.AppendResponse(r1) + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendAndRepeatResponse(r3, 1) + + r, err := autorest.SendWithSender(sender, newAsyncReq(http.MethodPut, nil), DoPollForAsynchronous(time.Millisecond)) + if err == nil { + t.Fatal("unexpected nil error when polling") + } + if se, ok := err.(*ServiceError); !ok { + t.Fatal("incorrect error type") + } else if se.Code == "" { + t.Fatal("empty service error code") + } else if se.Message == "" { + t.Fatal("empty service error message") + } + + if sender.Attempts() < sender.NumResponses() { + t.Fatalf("DoPollForAsynchronous stopped polling before receiving a terminated OperationResource") + } + autorest.Respond(r, autorest.ByClosing()) +} + +func TestDoPollForAsynchronous_CanBeCanceled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + delay := 5 * time.Second + + r1 := newSimpleAsyncResp() + + sender := mocks.NewSender() + sender.AppendResponse(r1) + sender.AppendAndRepeatResponse(newOperationResourceResponse("Busy"), -1) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + end := time.Now() + var err error + go func() { + req := newAsyncReq(http.MethodPut, nil) + req = req.WithContext(ctx) + var r *http.Response + r, err = autorest.SendWithSender(sender, req, DoPollForAsynchronous(delay)) + autorest.Respond(r, autorest.ByClosing()) + end = time.Now() + wg.Done() + }() + cancel() + wg.Wait() + time.Sleep(5 * time.Millisecond) + if err == nil { + t.Fatalf("DoPollForAsynchronous didn't cancel") + } + if end.Sub(start) >= delay { + t.Fatalf("DoPollForAsynchronous failed to cancel") + } +} + +func TestFuture_PollsUntilProvisioningStatusSucceeds(t *testing.T) { + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + sender := mocks.NewSender() + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendResponse(r3) + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + for done, err := future.Done(sender); !done; done, err = future.Done(sender) { + if future.PollingMethod() != PollingAsyncOperation { + t.Fatalf("wrong future polling method: %s", future.PollingMethod()) + } + if err != nil { + t.Fatalf("polling Done failed: %v", err) + } + delay, ok := future.GetPollingDelay() + if !ok { + t.Fatalf("expected Retry-After value") + } + time.Sleep(delay) + } + + if sender.Attempts() < sender.NumResponses() { + t.Fatalf("stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(future.Response(), + autorest.ByClosing()) +} + +func TestFuture_MarshallingSuccess(t *testing.T) { + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + data, err := json.Marshal(future) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + var future2 Future + err = json.Unmarshal(data, &future2) + if err != nil { + t.Fatalf("failed to unmarshal: %v", err) + } + + if reflect.DeepEqual(future.pt, future2.pt) { + t.Fatalf("marshalling unexpected match") + } + + // these fields don't get marshalled so nil them before deep comparison + future.pt.(*pollingTrackerPut).resp = nil + future.pt.(*pollingTrackerPut).rawBody = nil + if !reflect.DeepEqual(future.pt, future2.pt) { + t.Fatalf("marshalling futures don't match") + } +} + +func TestFuture_MarshallingWithError(t *testing.T) { + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceErrorResponse(operationFailed) + + sender := mocks.NewSender() + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendResponse(r3) + client := autorest.Client{ + PollingDelay: 1 * time.Second, + PollingDuration: autorest.DefaultPollingDuration, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: 1 * time.Second, + Sender: sender, + } + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + err = future.WaitForCompletion(context.Background(), client) + if err == nil { + t.Fatal("expected non-nil error") + } + + data, err := json.Marshal(future) + if err != nil { + t.Fatalf("failed to marshal: %v", err) + } + + var future2 Future + err = json.Unmarshal(data, &future2) + if err != nil { + t.Fatalf("failed to unmarshal: %v", err) + } + + if reflect.DeepEqual(future.pt, future2.pt) { + t.Fatalf("marshalling unexpected match") + } + + // these fields don't get marshalled so nil them before deep comparison + future.pt.(*pollingTrackerPut).resp = nil + future.pt.(*pollingTrackerPut).rawBody = nil + if !reflect.DeepEqual(future.pt, future2.pt) { + t.Fatalf("marshalling futures don't match") + } +} + +func TestFuture_CreateFromFailedOperation(t *testing.T) { + _, err := NewFutureFromResponse(newAsyncResponseWithError(http.MethodPut)) + if err == nil { + t.Fatal("expected non-nil error") + } +} + +func TestFuture_WaitForCompletion(t *testing.T) { + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + sender := mocks.NewSender() + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendResponse(r3) + client := autorest.Client{ + PollingDelay: 1 * time.Second, + PollingDuration: autorest.DefaultPollingDuration, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: 1 * time.Second, + Sender: sender, + } + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + err = future.WaitForCompletion(context.Background(), client) + if err != nil { + t.Fatalf("WaitForCompletion returned non-nil error") + } + + if sender.Attempts() < sender.NumResponses() { + t.Fatalf("stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(future.Response(), + autorest.ByClosing()) +} + +func TestFuture_WaitForCompletionRef(t *testing.T) { + r2 := newOperationResourceResponse("busy") + r3 := newOperationResourceResponse(operationSucceeded) + + sender := mocks.NewSender() + sender.AppendAndRepeatResponse(r2, 2) + sender.AppendResponse(r3) + client := autorest.Client{ + PollingDelay: 1 * time.Second, + PollingDuration: autorest.DefaultPollingDuration, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: 1 * time.Second, + Sender: sender, + } + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + err = future.WaitForCompletionRef(context.Background(), client) + if err != nil { + t.Fatalf("WaitForCompletion returned non-nil error") + } + + if sender.Attempts() < sender.NumResponses() { + t.Fatalf("stopped polling before receiving a terminated OperationResource") + } + + autorest.Respond(future.Response(), + autorest.ByClosing()) +} + +func TestFuture_WaitForCompletionTimedOut(t *testing.T) { + r2 := newProvisioningStatusResponse("busy") + + sender := mocks.NewSender() + sender.AppendAndRepeatResponseWithDelay(r2, 1*time.Second, 5) + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + client := autorest.Client{ + PollingDelay: autorest.DefaultPollingDelay, + PollingDuration: 2 * time.Second, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: 1 * time.Second, + Sender: sender, + } + + err = future.WaitForCompletionRef(context.Background(), client) + if err == nil { + t.Fatalf("WaitForCompletion returned nil error, should have timed out") + } +} + +func TestFuture_WaitForCompletionRetriesExceeded(t *testing.T) { + r1 := newProvisioningStatusResponse("InProgress") + + sender := mocks.NewSender() + sender.AppendResponse(r1) + sender.AppendAndRepeatError(errors.New("transient network failure"), autorest.DefaultRetryAttempts+1) + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + client := autorest.Client{ + PollingDelay: autorest.DefaultPollingDelay, + PollingDuration: autorest.DefaultPollingDuration, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: 100 * time.Millisecond, + Sender: sender, + } + + err = future.WaitForCompletionRef(context.Background(), client) + if err == nil { + t.Fatalf("WaitForCompletion returned nil error, should have errored out") + } +} + +func TestFuture_WaitForCompletionCancelled(t *testing.T) { + r1 := newProvisioningStatusResponse("InProgress") + + sender := mocks.NewSender() + sender.AppendAndRepeatResponseWithDelay(r1, 1*time.Second, 5) + + future, err := NewFutureFromResponse(newSimpleAsyncResp()) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + + client := autorest.Client{ + PollingDelay: autorest.DefaultPollingDelay, + PollingDuration: autorest.DefaultPollingDuration, + RetryAttempts: autorest.DefaultRetryAttempts, + RetryDuration: autorest.DefaultRetryDuration, + Sender: sender, + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(2 * time.Second) + cancel() + }() + + err = future.WaitForCompletionRef(ctx, client) + if err == nil { + t.Fatalf("WaitForCompletion returned nil error, should have been cancelled") + } +} + +func TestFuture_GetResultFromNonAsyncOperation(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusOK, mocks.NewBody(someResource)) + future, err := NewFutureFromResponse(resp) + if err != nil { + t.Fatalf("failed to create tracker: %v", err) + } + if pm := future.PollingMethod(); pm != PollingUnknown { + t.Fatalf("wrong polling method: %s", pm) + } + done, err := future.Done(nil) + if err != nil { + t.Fatalf("failed to check status: %v", err) + } + if !done { + t.Fatal("operation should be done") + } + res, err := future.GetResult(nil) + if err != nil { + t.Fatalf("failed to get result: %v", err) + } + if res != resp { + t.Fatal("result and response don't match") + } +} + +func TestFuture_GetResultNonTerminal(t *testing.T) { + resp := newAsyncResp(newAsyncReq(http.MethodDelete, nil), http.StatusAccepted, mocks.NewBody(fmt.Sprintf(operationResourceFormat, operationInProgress))) + mocks.SetResponseHeader(resp, headerAsyncOperation, mocks.TestAzureAsyncURL) + future, err := NewFutureFromResponse(resp) + if err != nil { + t.Fatalf("failed to create future: %v", err) + } + res, err := future.GetResult(nil) + if err == nil { + t.Fatal("expected non-nil error") + } + if res != nil { + t.Fatal("expected nil result") + } +} + +const ( + operationResourceIllegal = ` + This is not JSON and should fail...badly. + ` + + // returned from LROs that use Location header + pollingStateFormat = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + "provisioningState": "%s" + } + } + ` + + // returned from LROs that use Location header + errorResponse = ` + { + "error" : { + "code" : "InvalidParameter", + "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." + } + } + ` + + // returned from LROs that use Location header + unwrappedErrorResponse = ` + { + "code" : "InvalidParameter", + "message" : "tom-service-DISCOVERY-server-base-v1.core.local' is not a valid captured VHD blob name prefix." + } + ` + + // returned from LROs that use Location header + pollingStateEmpty = ` + { + "unused" : { + "somefield" : 42 + }, + "properties" : { + } + } + ` + + // returned from LROs that use Azure-AsyncOperation header + operationResourceFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + "properties" : { + "foo": "bar" + } + } + ` + + // returned from LROs that use Azure-AsyncOperation header + operationResourceErrorFormat = ` + { + "id": "/subscriptions/id/locations/westus/operationsStatus/sameguid", + "name": "sameguid", + "status" : "%s", + "startTime" : "2006-01-02T15:04:05Z", + "endTime" : "2006-01-02T16:04:05Z", + "percentComplete" : 50.00, + "properties" : {}, + "error" : { + "code" : "BadArgument", + "message" : "The provided database 'foo' has an invalid username." + } + } + ` + + // returned from an operation marked as LRO but really isn't + someResource = ` + { + "id": "/subscriptions/guid/resourceGroups/rg/providers/something/else/thing", + "name": "thing", + "type": "Imaginary.type", + "location": "Central US", + "properties": {} + } + ` +) + +// creates an async request with the specified body. +func newAsyncReq(reqMethod string, body *mocks.Body) *http.Request { + return mocks.NewRequestWithParams(reqMethod, mocks.TestURL, body) +} + +// creates an async response with the specified body. +// the req param is the originating LRO request. +func newAsyncResp(req *http.Request, statusCode int, body *mocks.Body) *http.Response { + status := "Unknown" + switch statusCode { + case http.StatusOK, http.StatusNoContent: + status = "Completed" + case http.StatusCreated: + status = "Creating" + case http.StatusAccepted: + status = "In progress" + case http.StatusBadRequest: + status = "Bad request" + } + r := mocks.NewResponseWithBodyAndStatus(body, statusCode, status) + r.Request = req + return r +} + +// creates a simple LRO response, PUT/201 with Azure-AsyncOperation header +func newSimpleAsyncResp() *http.Response { + r := newAsyncResp(newAsyncReq(http.MethodPut, nil), http.StatusCreated, mocks.NewBody(fmt.Sprintf(operationResourceFormat, operationInProgress))) + mocks.SetResponseHeader(r, headerAsyncOperation, mocks.TestAzureAsyncURL) + return r +} + +// creates a simple LRO response, POST/201 with Location header +func newSimpleLocationResp() *http.Response { + r := newAsyncResp(newAsyncReq(http.MethodPost, nil), http.StatusCreated, mocks.NewBody(fmt.Sprintf(pollingStateFormat, operationInProgress))) + mocks.SetResponseHeader(r, autorest.HeaderLocation, mocks.TestLocationURL) + return r +} + +// creates an async response that contains an error (HTTP 400 + error response body) +func newAsyncResponseWithError(reqMethod string) *http.Response { + return newAsyncResp(newAsyncReq(reqMethod, nil), http.StatusBadRequest, mocks.NewBody(errorResponse)) +} + +// creates a LRO polling response using the operation resource format (Azure-AsyncOperation LROs) +func newOperationResourceResponse(status string) *http.Response { + r := mocks.NewResponseWithBodyAndStatus(mocks.NewBody(fmt.Sprintf(operationResourceFormat, status)), http.StatusOK, status) + mocks.SetRetryHeader(r, retryDelay) + return r +} + +// creates a LRO polling error response using the operation resource format (Azure-AsyncOperation LROs) +func newOperationResourceErrorResponse(status string) *http.Response { + return mocks.NewResponseWithBodyAndStatus(mocks.NewBody(fmt.Sprintf(operationResourceErrorFormat, status)), http.StatusBadRequest, status) +} + +// creates a LRO polling response using the provisioning state format (Location LROs) +func newProvisioningStatusResponse(status string) *http.Response { + r := mocks.NewResponseWithBodyAndStatus(mocks.NewBody(fmt.Sprintf(pollingStateFormat, status)), http.StatusOK, status) + mocks.SetRetryHeader(r, retryDelay) + return r +} + +// creates a LRO polling error response using the provisioning state format (Location LROs) +func newProvisioningStatusErrorResponse(status string) *http.Response { + return mocks.NewResponseWithBodyAndStatus(mocks.NewBody(errorResponse), http.StatusBadRequest, status) +} + +// creates a LRO polling unwrapped error response using the provisioning state format (Location LROs) +func newProvisioningStatusUnwrappedErrorResponse(status string) *http.Response { + return mocks.NewResponseWithBodyAndStatus(mocks.NewBody(unwrappedErrorResponse), http.StatusBadRequest, status) +} + +// adds the Azure-AsyncOperation header with the specified location to the response +func setAsyncOpHeader(resp *http.Response, location string) { + mocks.SetResponseHeader(resp, http.CanonicalHeaderKey(headerAsyncOperation), location) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go new file mode 100644 index 000000000000..a14b87900fc1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth.go @@ -0,0 +1,444 @@ +package auth + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "crypto/rsa" + "crypto/x509" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "unicode/utf16" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "github.com/dimchansky/utfbom" + "golang.org/x/crypto/pkcs12" +) + +// NewAuthorizerFromEnvironment creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironment() (autorest.Authorizer, error) { + settings, err := getAuthenticationSettings() + if err != nil { + return nil, err + } + + if settings.resource == "" { + settings.resource = settings.environment.ResourceManagerEndpoint + } + + return settings.getAuthorizer() +} + +// NewAuthorizerFromEnvironmentWithResource creates an Authorizer configured from environment variables in the order: +// 1. Client credentials +// 2. Client certificate +// 3. Username password +// 4. MSI +func NewAuthorizerFromEnvironmentWithResource(resource string) (autorest.Authorizer, error) { + settings, err := getAuthenticationSettings() + if err != nil { + return nil, err + } + settings.resource = resource + return settings.getAuthorizer() +} + +type settings struct { + tenantID string + clientID string + clientSecret string + certificatePath string + certificatePassword string + username string + password string + envName string + resource string + environment azure.Environment +} + +func getAuthenticationSettings() (s settings, err error) { + s = settings{ + tenantID: os.Getenv("AZURE_TENANT_ID"), + clientID: os.Getenv("AZURE_CLIENT_ID"), + clientSecret: os.Getenv("AZURE_CLIENT_SECRET"), + certificatePath: os.Getenv("AZURE_CERTIFICATE_PATH"), + certificatePassword: os.Getenv("AZURE_CERTIFICATE_PASSWORD"), + username: os.Getenv("AZURE_USERNAME"), + password: os.Getenv("AZURE_PASSWORD"), + envName: os.Getenv("AZURE_ENVIRONMENT"), + resource: os.Getenv("AZURE_AD_RESOURCE"), + } + + if s.envName == "" { + s.environment = azure.PublicCloud + } else { + s.environment, err = azure.EnvironmentFromName(s.envName) + } + return +} + +func (settings settings) getAuthorizer() (autorest.Authorizer, error) { + //1.Client Credentials + if settings.clientSecret != "" { + config := NewClientCredentialsConfig(settings.clientID, settings.clientSecret, settings.tenantID) + config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint + config.Resource = settings.resource + return config.Authorizer() + } + + //2. Client Certificate + if settings.certificatePath != "" { + config := NewClientCertificateConfig(settings.certificatePath, settings.certificatePassword, settings.clientID, settings.tenantID) + config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint + config.Resource = settings.resource + return config.Authorizer() + } + + //3. Username Password + if settings.username != "" && settings.password != "" { + config := NewUsernamePasswordConfig(settings.username, settings.password, settings.clientID, settings.tenantID) + config.AADEndpoint = settings.environment.ActiveDirectoryEndpoint + config.Resource = settings.resource + return config.Authorizer() + } + + // 4. MSI + config := NewMSIConfig() + config.Resource = settings.resource + config.ClientID = settings.clientID + return config.Authorizer() +} + +// NewAuthorizerFromFile creates an Authorizer configured from a configuration file. +func NewAuthorizerFromFile(baseURI string) (autorest.Authorizer, error) { + fileLocation := os.Getenv("AZURE_AUTH_LOCATION") + if fileLocation == "" { + return nil, errors.New("auth file not found. Environment variable AZURE_AUTH_LOCATION is not set") + } + + contents, err := ioutil.ReadFile(fileLocation) + if err != nil { + return nil, err + } + + // Auth file might be encoded + decoded, err := decode(contents) + if err != nil { + return nil, err + } + + file := file{} + err = json.Unmarshal(decoded, &file) + if err != nil { + return nil, err + } + + resource, err := getResourceForToken(file, baseURI) + if err != nil { + return nil, err + } + + config, err := adal.NewOAuthConfig(file.ActiveDirectoryEndpoint, file.TenantID) + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalToken(*config, file.ClientID, file.ClientSecret, resource) + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// File represents the authentication file +type file struct { + ClientID string `json:"clientId,omitempty"` + ClientSecret string `json:"clientSecret,omitempty"` + SubscriptionID string `json:"subscriptionId,omitempty"` + TenantID string `json:"tenantId,omitempty"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpointUrl,omitempty"` + ResourceManagerEndpoint string `json:"resourceManagerEndpointUrl,omitempty"` + GraphResourceID string `json:"activeDirectoryGraphResourceId,omitempty"` + SQLManagementEndpoint string `json:"sqlManagementEndpointUrl,omitempty"` + GalleryEndpoint string `json:"galleryEndpointUrl,omitempty"` + ManagementEndpoint string `json:"managementEndpointUrl,omitempty"` +} + +func decode(b []byte) ([]byte, error) { + reader, enc := utfbom.Skip(bytes.NewReader(b)) + + switch enc { + case utfbom.UTF16LittleEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.LittleEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + case utfbom.UTF16BigEndian: + u16 := make([]uint16, (len(b)/2)-1) + err := binary.Read(reader, binary.BigEndian, &u16) + if err != nil { + return nil, err + } + return []byte(string(utf16.Decode(u16))), nil + } + return ioutil.ReadAll(reader) +} + +func getResourceForToken(f file, baseURI string) (string, error) { + // Compare dafault base URI from the SDK to the endpoints from the public cloud + // Base URI and token resource are the same string. This func finds the authentication + // file field that matches the SDK base URI. The SDK defines the public cloud + // endpoint as its default base URI + if !strings.HasSuffix(baseURI, "/") { + baseURI += "/" + } + switch baseURI { + case azure.PublicCloud.ServiceManagementEndpoint: + return f.ManagementEndpoint, nil + case azure.PublicCloud.ResourceManagerEndpoint: + return f.ResourceManagerEndpoint, nil + case azure.PublicCloud.ActiveDirectoryEndpoint: + return f.ActiveDirectoryEndpoint, nil + case azure.PublicCloud.GalleryEndpoint: + return f.GalleryEndpoint, nil + case azure.PublicCloud.GraphEndpoint: + return f.GraphResourceID, nil + } + return "", fmt.Errorf("auth: base URI not found in endpoints") +} + +// NewClientCredentialsConfig creates an AuthorizerConfig object configured to obtain an Authorizer through Client Credentials. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCredentialsConfig(clientID string, clientSecret string, tenantID string) ClientCredentialsConfig { + return ClientCredentialsConfig{ + ClientID: clientID, + ClientSecret: clientSecret, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewClientCertificateConfig creates a ClientCertificateConfig object configured to obtain an Authorizer through client certificate. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewClientCertificateConfig(certificatePath string, certificatePassword string, clientID string, tenantID string) ClientCertificateConfig { + return ClientCertificateConfig{ + CertificatePath: certificatePath, + CertificatePassword: certificatePassword, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewUsernamePasswordConfig creates an UsernamePasswordConfig object configured to obtain an Authorizer through username and password. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewUsernamePasswordConfig(username string, password string, clientID string, tenantID string) UsernamePasswordConfig { + return UsernamePasswordConfig{ + Username: username, + Password: password, + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +// NewMSIConfig creates an MSIConfig object configured to obtain an Authorizer through MSI. +func NewMSIConfig() MSIConfig { + return MSIConfig{ + Resource: azure.PublicCloud.ResourceManagerEndpoint, + } +} + +// NewDeviceFlowConfig creates a DeviceFlowConfig object configured to obtain an Authorizer through device flow. +// Defaults to Public Cloud and Resource Manager Endpoint. +func NewDeviceFlowConfig(clientID string, tenantID string) DeviceFlowConfig { + return DeviceFlowConfig{ + ClientID: clientID, + TenantID: tenantID, + Resource: azure.PublicCloud.ResourceManagerEndpoint, + AADEndpoint: azure.PublicCloud.ActiveDirectoryEndpoint, + } +} + +//AuthorizerConfig provides an authorizer from the configuration provided. +type AuthorizerConfig interface { + Authorizer() (autorest.Authorizer, error) +} + +// ClientCredentialsConfig provides the options to get a bearer authorizer from client credentials. +type ClientCredentialsConfig struct { + ClientID string + ClientSecret string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from client credentials. +func (ccc ClientCredentialsConfig) Authorizer() (autorest.Authorizer, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalToken(*oauthConfig, ccc.ClientID, ccc.ClientSecret, ccc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from client credentials: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// ClientCertificateConfig provides the options to get a bearer authorizer from a client certificate. +type ClientCertificateConfig struct { + ClientID string + CertificatePath string + CertificatePassword string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets an authorizer object from client certificate. +func (ccc ClientCertificateConfig) Authorizer() (autorest.Authorizer, error) { + oauthConfig, err := adal.NewOAuthConfig(ccc.AADEndpoint, ccc.TenantID) + + certData, err := ioutil.ReadFile(ccc.CertificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", ccc.CertificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, ccc.CertificatePassword) + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spToken, err := adal.NewServicePrincipalTokenFromCertificate(*oauthConfig, ccc.ClientID, certificate, rsaPrivateKey, ccc.Resource) + + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from certificate auth: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// DeviceFlowConfig provides the options to get a bearer authorizer using device flow authentication. +type DeviceFlowConfig struct { + ClientID string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from device flow. +func (dfc DeviceFlowConfig) Authorizer() (autorest.Authorizer, error) { + oauthClient := &autorest.Client{} + oauthConfig, err := adal.NewOAuthConfig(dfc.AADEndpoint, dfc.TenantID) + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, *oauthConfig, dfc.ClientID, dfc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + + log.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + + spToken, err := adal.NewServicePrincipalTokenFromManualToken(*oauthConfig, dfc.ClientID, dfc.Resource, *token) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +// UsernamePasswordConfig provides the options to get a bearer authorizer from a username and a password. +type UsernamePasswordConfig struct { + ClientID string + Username string + Password string + TenantID string + AADEndpoint string + Resource string +} + +// Authorizer gets the authorizer from a username and a password. +func (ups UsernamePasswordConfig) Authorizer() (autorest.Authorizer, error) { + + oauthConfig, err := adal.NewOAuthConfig(ups.AADEndpoint, ups.TenantID) + + spToken, err := adal.NewServicePrincipalTokenFromUsernamePassword(*oauthConfig, ups.ClientID, ups.Username, ups.Password, ups.Resource) + + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from username and password auth: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// MSIConfig provides the options to get a bearer authorizer through MSI. +type MSIConfig struct { + Resource string + ClientID string +} + +// Authorizer gets the authorizer from MSI. +func (mc MSIConfig) Authorizer() (autorest.Authorizer, error) { + msiEndpoint, err := adal.GetMSIVMEndpoint() + if err != nil { + return nil, err + } + + spToken, err := adal.NewServicePrincipalTokenFromMSI(msiEndpoint, mc.Resource) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from MSI: %v", err) + } + + return autorest.NewBearerAuthorizer(spToken), nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth_test.go new file mode 100644 index 000000000000..667961bdeae4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/auth/auth_test.go @@ -0,0 +1,108 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package auth + +import ( + "encoding/json" + "io/ioutil" + "os" + "path/filepath" + "reflect" + "testing" +) + +var ( + expectedFile = file{ + ClientID: "client-id-123", + ClientSecret: "client-secret-456", + SubscriptionID: "sub-id-789", + TenantID: "tenant-id-123", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com", + ResourceManagerEndpoint: "https://management.azure.com/", + GraphResourceID: "https://graph.windows.net/", + SQLManagementEndpoint: "https://management.core.windows.net:8443/", + GalleryEndpoint: "https://gallery.azure.com/", + ManagementEndpoint: "https://management.core.windows.net/", + } +) + +func TestNewAuthorizerFromFile(t *testing.T) { + os.Setenv("AZURE_AUTH_LOCATION", filepath.Join(getCredsPath(), "credsutf16le.json")) + authorizer, err := NewAuthorizerFromFile("https://management.azure.com") + if err != nil || authorizer == nil { + t.Logf("NewAuthorizerFromFile failed, got error %v", err) + t.Fail() + } +} + +func TestNewAuthorizerFromEnvironment(t *testing.T) { + os.Setenv("AZURE_TENANT_ID", expectedFile.TenantID) + os.Setenv("AZURE_CLIENT_ID", expectedFile.ClientID) + os.Setenv("AZURE_CLIENT_SECRET", expectedFile.ClientSecret) + authorizer, err := NewAuthorizerFromEnvironment() + + if err != nil || authorizer == nil { + t.Logf("NewAuthorizerFromFile failed, got error %v", err) + t.Fail() + } +} + +func TestDecodeAndUnmarshal(t *testing.T) { + tests := []string{ + "credsutf8.json", + "credsutf16le.json", + "credsutf16be.json", + } + creds := getCredsPath() + for _, test := range tests { + b, err := ioutil.ReadFile(filepath.Join(creds, test)) + if err != nil { + t.Logf("error reading file '%s': %s", test, err) + t.Fail() + } + decoded, err := decode(b) + if err != nil { + t.Logf("error decoding file '%s': %s", test, err) + t.Fail() + } + var got file + err = json.Unmarshal(decoded, &got) + if err != nil { + t.Logf("error unmarshaling file '%s': %s", test, err) + t.Fail() + } + if !reflect.DeepEqual(expectedFile, got) { + t.Logf("unmarshaled map expected %v, got %v", expectedFile, got) + t.Fail() + } + } +} + +func getCredsPath() string { + gopath := os.Getenv("GOPATH") + return filepath.Join(gopath, "src", "github.com", "Azure", "go-autorest", "testdata") +} + +func areMapsEqual(a, b map[string]string) bool { + if len(a) != len(b) { + return false + } + for k := range a { + if a[k] != b[k] { + return false + } + } + return true +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go new file mode 100644 index 000000000000..3a0a439ff930 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure.go @@ -0,0 +1,326 @@ +// Package azure provides Azure-specific implementations used with AutoRest. +// See the included examples for more detail. +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "regexp" + "strconv" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +const ( + // HeaderClientID is the Azure extension header to set a user-specified request ID. + HeaderClientID = "x-ms-client-request-id" + + // HeaderReturnClientID is the Azure extension header to set if the user-specified request ID + // should be included in the response. + HeaderReturnClientID = "x-ms-return-client-request-id" + + // HeaderRequestID is the Azure extension header of the service generated request ID returned + // in the response. + HeaderRequestID = "x-ms-request-id" +) + +// ServiceError encapsulates the error response from an Azure service. +// It adhears to the OData v4 specification for error responses. +type ServiceError struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` +} + +func (se ServiceError) Error() string { + result := fmt.Sprintf("Code=%q Message=%q", se.Code, se.Message) + + if se.Target != nil { + result += fmt.Sprintf(" Target=%q", *se.Target) + } + + if se.Details != nil { + d, err := json.Marshal(se.Details) + if err != nil { + result += fmt.Sprintf(" Details=%v", se.Details) + } + result += fmt.Sprintf(" Details=%v", string(d)) + } + + if se.InnerError != nil { + d, err := json.Marshal(se.InnerError) + if err != nil { + result += fmt.Sprintf(" InnerError=%v", se.InnerError) + } + result += fmt.Sprintf(" InnerError=%v", string(d)) + } + + if se.AdditionalInfo != nil { + d, err := json.Marshal(se.AdditionalInfo) + if err != nil { + result += fmt.Sprintf(" AdditionalInfo=%v", se.AdditionalInfo) + } + result += fmt.Sprintf(" AdditionalInfo=%v", string(d)) + } + + return result +} + +// UnmarshalJSON implements the json.Unmarshaler interface for the ServiceError type. +func (se *ServiceError) UnmarshalJSON(b []byte) error { + // per the OData v4 spec the details field must be an array of JSON objects. + // unfortunately not all services adhear to the spec and just return a single + // object instead of an array with one object. so we have to perform some + // shenanigans to accommodate both cases. + // http://docs.oasis-open.org/odata/odata-json-format/v4.0/os/odata-json-format-v4.0-os.html#_Toc372793091 + + type serviceError1 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details []map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + type serviceError2 struct { + Code string `json:"code"` + Message string `json:"message"` + Target *string `json:"target"` + Details map[string]interface{} `json:"details"` + InnerError map[string]interface{} `json:"innererror"` + AdditionalInfo []map[string]interface{} `json:"additionalInfo"` + } + + se1 := serviceError1{} + err := json.Unmarshal(b, &se1) + if err == nil { + se.populate(se1.Code, se1.Message, se1.Target, se1.Details, se1.InnerError, se1.AdditionalInfo) + return nil + } + + se2 := serviceError2{} + err = json.Unmarshal(b, &se2) + if err == nil { + se.populate(se2.Code, se2.Message, se2.Target, nil, se2.InnerError, se2.AdditionalInfo) + se.Details = append(se.Details, se2.Details) + return nil + } + return err +} + +func (se *ServiceError) populate(code, message string, target *string, details []map[string]interface{}, inner map[string]interface{}, additional []map[string]interface{}) { + se.Code = code + se.Message = message + se.Target = target + se.Details = details + se.InnerError = inner + se.AdditionalInfo = additional +} + +// RequestError describes an error response returned by Azure service. +type RequestError struct { + autorest.DetailedError + + // The error returned by the Azure service. + ServiceError *ServiceError `json:"error"` + + // The request id (from the x-ms-request-id-header) of the request. + RequestID string +} + +// Error returns a human-friendly error message from service error. +func (e RequestError) Error() string { + return fmt.Sprintf("autorest/azure: Service returned an error. Status=%v %v", + e.StatusCode, e.ServiceError) +} + +// IsAzureError returns true if the passed error is an Azure Service error; false otherwise. +func IsAzureError(e error) bool { + _, ok := e.(*RequestError) + return ok +} + +// Resource contains details about an Azure resource. +type Resource struct { + SubscriptionID string + ResourceGroup string + Provider string + ResourceType string + ResourceName string +} + +// ParseResourceID parses a resource ID into a ResourceDetails struct. +// See https://docs.microsoft.com/en-us/azure/azure-resource-manager/resource-group-template-functions-resource#return-value-4. +func ParseResourceID(resourceID string) (Resource, error) { + + const resourceIDPatternText = `(?i)subscriptions/(.+)/resourceGroups/(.+)/providers/(.+?)/(.+?)/(.+)` + resourceIDPattern := regexp.MustCompile(resourceIDPatternText) + match := resourceIDPattern.FindStringSubmatch(resourceID) + + if len(match) == 0 { + return Resource{}, fmt.Errorf("parsing failed for %s. Invalid resource Id format", resourceID) + } + + v := strings.Split(match[5], "/") + resourceName := v[len(v)-1] + + result := Resource{ + SubscriptionID: match[1], + ResourceGroup: match[2], + Provider: match[3], + ResourceType: match[4], + ResourceName: resourceName, + } + + return result, nil +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) RequestError { + if v, ok := original.(*RequestError); ok { + return *v + } + + statusCode := autorest.UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + return RequestError{ + DetailedError: autorest.DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + }, + } +} + +// WithReturningClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is the passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). It also sets the x-ms-return-client-request-id +// header to true such that UUID accompanies the http.Response. +func WithReturningClientID(uuid string) autorest.PrepareDecorator { + preparer := autorest.CreatePreparer( + WithClientID(uuid), + WithReturnClientID(true)) + + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err != nil { + return r, err + } + return preparer.Prepare(r) + }) + } +} + +// WithClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-client-request-id whose value is passed, undecorated UUID (e.g., +// "0F39878C-5F76-4DB8-A25D-61D2C193C3CA"). +func WithClientID(uuid string) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderClientID, uuid) +} + +// WithReturnClientID returns a PrepareDecorator that adds an HTTP extension header of +// x-ms-return-client-request-id whose boolean value indicates if the value of the +// x-ms-client-request-id header should be included in the http.Response. +func WithReturnClientID(b bool) autorest.PrepareDecorator { + return autorest.WithHeader(HeaderReturnClientID, strconv.FormatBool(b)) +} + +// ExtractClientID extracts the client identifier from the x-ms-client-request-id header set on the +// http.Request sent to the service (and returned in the http.Response) +func ExtractClientID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderClientID, resp) +} + +// ExtractRequestID extracts the Azure server generated request identifier from the +// x-ms-request-id header. +func ExtractRequestID(resp *http.Response) string { + return autorest.ExtractHeaderValue(HeaderRequestID, resp) +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an +// azure.RequestError by reading the response body unless the response HTTP status code +// is among the set passed. +// +// If there is a chance service may return responses other than the Azure error +// format and the response cannot be parsed into an error, a decoding error will +// be returned containing the response body. In any case, the Responder will +// return an error if the status code is not satisfied. +// +// If this Responder returns an error, the response body will be replaced with +// an in-memory reader, which needs no further closing. +func WithErrorUnlessStatusCode(codes ...int) autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !autorest.ResponseHasStatusCode(resp, codes...) { + var e RequestError + defer resp.Body.Close() + + // Copy and replace the Body in case it does not contain an error object. + // This will leave the Body available to the caller. + b, decodeErr := autorest.CopyAndDecode(autorest.EncodedAsJSON, resp.Body, &e) + resp.Body = ioutil.NopCloser(&b) + if decodeErr != nil { + return fmt.Errorf("autorest/azure: error response cannot be parsed: %q error: %v", b.String(), decodeErr) + } + if e.ServiceError == nil { + // Check if error is unwrapped ServiceError + if err := json.Unmarshal(b.Bytes(), &e.ServiceError); err != nil { + return err + } + } + if e.ServiceError.Message == "" { + // if we're here it means the returned error wasn't OData v4 compliant. + // try to unmarshal the body as raw JSON in hopes of getting something. + rawBody := map[string]interface{}{} + if err := json.Unmarshal(b.Bytes(), &rawBody); err != nil { + return err + } + e.ServiceError = &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + } + if len(rawBody) > 0 { + e.ServiceError.Details = []map[string]interface{}{rawBody} + } + } + e.Response = resp + e.RequestID = ExtractRequestID(resp) + if e.StatusCode == nil { + e.StatusCode = resp.StatusCode + } + err = &e + } + return err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go new file mode 100644 index 000000000000..a99ccae7faa6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/azure_test.go @@ -0,0 +1,668 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + headerAuthorization = "Authorization" + longDelay = 5 * time.Second + retryDelay = 10 * time.Millisecond + testLogPrefix = "azure:" +) + +// Use a Client Inspector to set the request identifier. +func ExampleWithClientID() { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL("https://microsoft.com/a/b/c/")) + + c := autorest.Client{Sender: mocks.NewSender()} + c.RequestInspector = WithReturningClientID(uuid) + + autorest.SendWithSender(c, req) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderClientID, req.Header.Get(HeaderClientID)) + fmt.Printf("Inspector added the %s header with the value %s\n", + HeaderReturnClientID, req.Header.Get(HeaderReturnClientID)) + // Output: + // Inspector added the x-ms-client-request-id header with the value 71FDB9F4-5E49-4C12-B266-DE7B4FD999A6 + // Inspector added the x-ms-return-client-request-id header with the value true +} + +func TestWithReturningClientIDReturnsError(t *testing.T) { + var errIn error + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + _, errOut := autorest.Prepare(&http.Request{}, + withErrorPrepareDecorator(&errIn), + WithReturningClientID(uuid)) + + if errOut == nil || errIn != errOut { + t.Fatalf("azure: WithReturningClientID failed to exit early when receiving an error -- expected (%v), received (%v)", + errIn, errOut) + } +} + +func TestWithClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + req, _ := autorest.Prepare(&http.Request{}, + WithClientID(uuid)) + + if req.Header.Get(HeaderClientID) != uuid { + t.Fatalf("azure: WithClientID failed to set %s -- expected %s, received %s", + HeaderClientID, uuid, req.Header.Get(HeaderClientID)) + } +} + +func TestWithReturnClientID(t *testing.T) { + b := false + req, _ := autorest.Prepare(&http.Request{}, + WithReturnClientID(b)) + + if req.Header.Get(HeaderReturnClientID) != strconv.FormatBool(b) { + t.Fatalf("azure: WithReturnClientID failed to set %s -- expected %s, received %s", + HeaderClientID, strconv.FormatBool(b), req.Header.Get(HeaderClientID)) + } +} + +func TestExtractClientID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderClientID, uuid) + + if ExtractClientID(resp) != uuid { + t.Fatalf("azure: ExtractClientID failed to extract the %s -- expected %s, received %s", + HeaderClientID, uuid, ExtractClientID(resp)) + } +} + +func TestExtractRequestID(t *testing.T) { + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + resp := mocks.NewResponse() + mocks.SetResponseHeader(resp, HeaderRequestID, uuid) + + if ExtractRequestID(resp) != uuid { + t.Fatalf("azure: ExtractRequestID failed to extract the %s -- expected %s, received %s", + HeaderRequestID, uuid, ExtractRequestID(resp)) + } +} + +func TestIsAzureError_ReturnsTrueForAzureError(t *testing.T) { + if !IsAzureError(&RequestError{}) { + t.Fatalf("azure: IsAzureError failed to return true for an Azure Service error") + } +} + +func TestIsAzureError_ReturnsFalseForNonAzureError(t *testing.T) { + if IsAzureError(fmt.Errorf("An Error")) { + t.Fatalf("azure: IsAzureError return true for an non-Azure Service error") + } +} + +func TestNewErrorWithError_UsesReponseStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("Error"), "packageType", "method", mocks.NewResponseWithStatus("Forbidden", http.StatusForbidden), "message") + if e.StatusCode != http.StatusForbidden { + t.Fatalf("azure: NewErrorWithError failed to use the Status Code of the passed Response -- expected %v, received %v", http.StatusForbidden, e.StatusCode) + } +} + +func TestNewErrorWithError_ReturnsUnwrappedError(t *testing.T) { + e1 := RequestError{} + e1.ServiceError = &ServiceError{Code: "42", Message: "A Message"} + e1.StatusCode = 200 + e1.RequestID = "A RequestID" + e2 := NewErrorWithError(&e1, "packageType", "method", nil, "message") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("azure: NewErrorWithError wrapped an RequestError -- expected %T, received %T", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(RequestError); !ok { + t.Fatalf("azure: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestWithErrorUnlessStatusCode_NotAnAzureError(t *testing.T) { + body := ` + + IIS Error page + + Some non-JSON error page + ` + r := mocks.NewResponseWithContent(body) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusBadRequest + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + ok, _ := err.(*RequestError) + if ok != nil { + t.Fatalf("azure: azure.RequestError returned from malformed response: %v", err) + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != body { + t.Fatalf("response body is wrong. got=%q exptected=%q", string(b), body) + } +} + +func TestWithErrorUnlessStatusCode_FoundAzureErrorWithoutDetails(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now." + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Azure is having trouble right now.\"" + if !reflect.DeepEqual(expected, azErr.Error()) { + t.Fatalf("azure: service error is not unmarshaled properly.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%d Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_FoundAzureFullError(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Azure is having trouble right now.", + "target": "target1", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}], + "innererror": { "customKey": "customValue" }, + "additionalInfo": [{"type": "someErrorType", "info": {"someProperty": "someValue"}}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + if expected := "InternalError"; azErr.ServiceError.Code != expected { + t.Fatalf("azure: wrong error code. expected=%q; got=%q", expected, azErr.ServiceError.Code) + } + + if azErr.ServiceError.Message == "" { + t.Fatalf("azure: error message is not unmarshaled properly") + } + + if *azErr.ServiceError.Target == "" { + t.Fatalf("azure: error target is not unmarshaled properly") + } + + d, _ := json.Marshal(azErr.ServiceError.Details) + if string(d) != `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` { + t.Fatalf("azure: error details is not unmarshaled properly") + } + + i, _ := json.Marshal(azErr.ServiceError.InnerError) + if string(i) != `{"customKey":"customValue"}` { + t.Fatalf("azure: inner error is not unmarshaled properly") + } + + a, _ := json.Marshal(azErr.ServiceError.AdditionalInfo) + if string(a) != `[{"info":{"someProperty":"someValue"},"type":"someErrorType"}]` { + t.Fatalf("azure: error additional info is not unmarshaled properly") + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_NoAzureError(t *testing.T) { + j := `{ + "Status":"NotFound" + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("azure: returned error is not azure.RequestError: %T", err) + } + + expected := &ServiceError{ + Code: "Unknown", + Message: "Unknown service error", + Details: []map[string]interface{}{ + {"Status": "NotFound"}, + }, + } + + if !reflect.DeepEqual(expected, azErr.ServiceError) { + t.Fatalf("azure: service error is not unmarshaled properly. expected=%q\ngot=%q", expected, azErr.ServiceError) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Fatalf("azure: got wrong StatusCode=%v Expected=%d", azErr.StatusCode, expected) + } + if expected := uuid; azErr.RequestID != expected { + t.Fatalf("azure: wrong request ID in error. expected=%q; got=%q", expected, azErr.RequestID) + } + + _ = azErr.Error() + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatal(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestWithErrorUnlessStatusCode_UnwrappedError(t *testing.T) { + j := `{ + "code": "InternalError", + "message": "Azure is having trouble right now.", + "target": "target1", + "details": [{"code": "conflict1", "message":"error message1"}, + {"code": "conflict2", "message":"error message2"}], + "innererror": { "customKey": "customValue" }, + "additionalInfo": [{"type": "someErrorType", "info": {"someProperty": "someValue"}}] + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatal("azure: returned nil error for proper error response") + } + + azErr, ok := err.(*RequestError) + if !ok { + t.Fatalf("returned error is not azure.RequestError: %T", err) + } + + if expected := http.StatusInternalServerError; azErr.StatusCode != expected { + t.Logf("Incorrect StatusCode got: %v want: %d", azErr.StatusCode, expected) + t.Fail() + } + + if expected := "Azure is having trouble right now."; azErr.Message != expected { + t.Logf("Incorrect Message\n\tgot: %q\n\twant: %q", azErr.Message, expected) + t.Fail() + } + + if expected := uuid; azErr.RequestID != expected { + t.Logf("Incorrect request ID\n\tgot: %q\n\twant: %q", azErr.RequestID, expected) + t.Fail() + } + + if azErr.ServiceError == nil { + t.Logf("`ServiceError` was nil when it shouldn't have been.") + t.Fail() + } + + if expected := "target1"; *azErr.ServiceError.Target != expected { + t.Logf("Incorrect Target\n\tgot: %q\n\twant: %q", *azErr.ServiceError.Target, expected) + t.Fail() + } + + expectedServiceErrorDetails := `[{"code":"conflict1","message":"error message1"},{"code":"conflict2","message":"error message2"}]` + if azErr.ServiceError.Details == nil { + t.Logf("`ServiceError.Details` was nil when it should have been %q", expectedServiceErrorDetails) + t.Fail() + } else if details, _ := json.Marshal(azErr.ServiceError.Details); expectedServiceErrorDetails != string(details) { + t.Logf("Error details was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(details), expectedServiceErrorDetails) + t.Fail() + } + + expectedServiceErrorInnerError := `{"customKey":"customValue"}` + if azErr.ServiceError.InnerError == nil { + t.Logf("`ServiceError.InnerError` was nil when it should have been %q", expectedServiceErrorInnerError) + t.Fail() + } else if innerError, _ := json.Marshal(azErr.ServiceError.InnerError); expectedServiceErrorInnerError != string(innerError) { + t.Logf("Inner error was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(innerError), expectedServiceErrorInnerError) + t.Fail() + } + + expectedServiceErrorAdditionalInfo := `[{"info":{"someProperty":"someValue"},"type":"someErrorType"}]` + if azErr.ServiceError.AdditionalInfo == nil { + t.Logf("`ServiceError.AdditionalInfo` was nil when it should have been %q", expectedServiceErrorAdditionalInfo) + t.Fail() + } else if additionalInfo, _ := json.Marshal(azErr.ServiceError.AdditionalInfo); expectedServiceErrorAdditionalInfo != string(additionalInfo) { + t.Logf("Additional info was not unmarshaled properly.\n\tgot: %q\n\twant: %q", string(additionalInfo), expectedServiceErrorAdditionalInfo) + t.Fail() + } + + // the error body should still be there + defer r.Body.Close() + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Error(err) + } + if string(b) != j { + t.Fatalf("response body is wrong. got=%q expected=%q", string(b), j) + } + +} + +func TestRequestErrorString_WithError(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Conflict", + "target": "target1", + "details": [{"code": "conflict1", "message":"error message1"}], + "innererror": { "customKey": "customValue" }, + "additionalInfo": [{"type": "someErrorType", "info": {"someProperty": "someValue"}}] + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, _ := err.(*RequestError) + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Target=\"target1\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}] InnerError={\"customKey\":\"customValue\"} AdditionalInfo=[{\"info\":{\"someProperty\":\"someValue\"},\"type\":\"someErrorType\"}]" + if expected != azErr.Error() { + t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } +} + +func TestRequestErrorString_WithErrorNonConforming(t *testing.T) { + j := `{ + "error": { + "code": "InternalError", + "message": "Conflict", + "details": {"code": "conflict1", "message":"error message1"} + } + }` + uuid := "71FDB9F4-5E49-4C12-B266-DE7B4FD999A6" + r := mocks.NewResponseWithContent(j) + mocks.SetResponseHeader(r, HeaderRequestID, uuid) + r.Request = mocks.NewRequest() + r.StatusCode = http.StatusInternalServerError + r.Status = http.StatusText(r.StatusCode) + + err := autorest.Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByClosing()) + + if err == nil { + t.Fatalf("azure: returned nil error for proper error response") + } + azErr, _ := err.(*RequestError) + expected := "autorest/azure: Service returned an error. Status=500 Code=\"InternalError\" Message=\"Conflict\" Details=[{\"code\":\"conflict1\",\"message\":\"error message1\"}]" + if expected != azErr.Error() { + t.Fatalf("azure: send wrong RequestError.\nexpected=%v\ngot=%v", expected, azErr.Error()) + } +} + +func TestParseResourceID_WithValidBasicResourceID(t *testing.T) { + + basicResourceID := "/subscriptions/subid-3-3-4/resourceGroups/regGroupVladdb/providers/Microsoft.Network/LoadBalancer/testResourceName" + want := Resource{ + SubscriptionID: "subid-3-3-4", + ResourceGroup: "regGroupVladdb", + Provider: "Microsoft.Network", + ResourceType: "LoadBalancer", + ResourceName: "testResourceName", + } + got, err := ParseResourceID(basicResourceID) + + if err != nil { + t.Fatalf("azure: error returned while parsing valid resourceId") + } + + if got != want { + t.Logf("got: %+v\nwant: %+v", got, want) + t.Fail() + } +} + +func TestParseResourceID_WithValidSubResourceID(t *testing.T) { + subresourceID := "/subscriptions/subid-3-3-4/resourceGroups/regGroupVladdb/providers/Microsoft.Network/LoadBalancer/resource/is/a/subresource/actualresourceName" + want := Resource{ + SubscriptionID: "subid-3-3-4", + ResourceGroup: "regGroupVladdb", + Provider: "Microsoft.Network", + ResourceType: "LoadBalancer", + ResourceName: "actualresourceName", + } + got, err := ParseResourceID(subresourceID) + + if err != nil { + t.Fatalf("azure: error returned while parsing valid resourceId") + } + + if got != want { + t.Logf("got: %+v\nwant: %+v", got, want) + t.Fail() + } +} + +func TestParseResourceID_WithIncompleteResourceID(t *testing.T) { + basicResourceID := "/subscriptions/subid-3-3-4/resourceGroups/regGroupVladdb/providers/Microsoft.Network/" + want := Resource{} + + got, err := ParseResourceID(basicResourceID) + + if err == nil { + t.Fatalf("azure: no error returned on incomplete resource id") + } + + if got != want { + t.Logf("got: %+v\nwant: %+v", got, want) + t.Fail() + } +} + +func TestParseResourceID_WithMalformedResourceID(t *testing.T) { + malformedResourceID := "/providers/subid-3-3-4/resourceGroups/regGroupVladdb/subscriptions/Microsoft.Network/LoadBalancer/testResourceName" + want := Resource{} + + got, err := ParseResourceID(malformedResourceID) + + if err == nil { + t.Fatalf("azure: error returned while parsing malformed resourceID") + } + + if got != want { + t.Logf("got: %+v\nwant: %+v", got, want) + t.Fail() + } +} + +func withErrorPrepareDecorator(e *error) autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + *e = fmt.Errorf("azure: Faux Prepare Error") + return r, *e + }) + } +} + +func withAsyncResponseDecorator(n int) autorest.SendDecorator { + i := 0 + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + if i < n { + resp.StatusCode = http.StatusCreated + resp.Header = http.Header{} + resp.Header.Add(http.CanonicalHeaderKey(headerAsyncOperation), mocks.TestURL) + i++ + } else { + resp.StatusCode = http.StatusOK + resp.Header.Del(http.CanonicalHeaderKey(headerAsyncOperation)) + } + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return autorest.WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() autorest.PrepareDecorator { + return func(p autorest.Preparer) autorest.Preparer { + return autorest.PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() autorest.RespondDecorator { + return func(r autorest.Responder) autorest.Responder { + return autorest.ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go new file mode 100644 index 000000000000..b62bf03baceb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/profile.go @@ -0,0 +1,72 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + + "github.com/dimchansky/utfbom" + "github.com/mitchellh/go-homedir" +) + +// Profile represents a Profile from the Azure CLI +type Profile struct { + InstallationID string `json:"installationId"` + Subscriptions []Subscription `json:"subscriptions"` +} + +// Subscription represents a Subscription from the Azure CLI +type Subscription struct { + EnvironmentName string `json:"environmentName"` + ID string `json:"id"` + IsDefault bool `json:"isDefault"` + Name string `json:"name"` + State string `json:"state"` + TenantID string `json:"tenantId"` + User *User `json:"user"` +} + +// User represents a User from the Azure CLI +type User struct { + Name string `json:"name"` + Type string `json:"type"` +} + +// ProfilePath returns the path where the Azure Profile is stored from the Azure CLI +func ProfilePath() (string, error) { + return homedir.Expand("~/.azure/azureProfile.json") +} + +// LoadProfile restores a Profile object from a file located at 'path'. +func LoadProfile(path string) (result Profile, err error) { + var contents []byte + contents, err = ioutil.ReadFile(path) + if err != nil { + err = fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + return + } + reader := utfbom.SkipOnly(bytes.NewReader(contents)) + + dec := json.NewDecoder(reader) + if err = dec.Decode(&result); err != nil { + err = fmt.Errorf("failed to decode contents of file (%s) into a Profile representation: %v", path, err) + return + } + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go new file mode 100644 index 000000000000..83b81c34bf8d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/cli/token.go @@ -0,0 +1,114 @@ +package cli + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "os" + "strconv" + "time" + + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/date" + "github.com/mitchellh/go-homedir" +) + +// Token represents an AccessToken from the Azure CLI +type Token struct { + AccessToken string `json:"accessToken"` + Authority string `json:"_authority"` + ClientID string `json:"_clientId"` + ExpiresOn string `json:"expiresOn"` + IdentityProvider string `json:"identityProvider"` + IsMRRT bool `json:"isMRRT"` + RefreshToken string `json:"refreshToken"` + Resource string `json:"resource"` + TokenType string `json:"tokenType"` + UserID string `json:"userId"` +} + +// ToADALToken converts an Azure CLI `Token`` to an `adal.Token`` +func (t Token) ToADALToken() (converted adal.Token, err error) { + tokenExpirationDate, err := ParseExpirationDate(t.ExpiresOn) + if err != nil { + err = fmt.Errorf("Error parsing Token Expiration Date %q: %+v", t.ExpiresOn, err) + return + } + + difference := tokenExpirationDate.Sub(date.UnixEpoch()) + + converted = adal.Token{ + AccessToken: t.AccessToken, + Type: t.TokenType, + ExpiresIn: "3600", + ExpiresOn: strconv.Itoa(int(difference.Seconds())), + RefreshToken: t.RefreshToken, + Resource: t.Resource, + } + return +} + +// AccessTokensPath returns the path where access tokens are stored from the Azure CLI +// TODO(#199): add unit test. +func AccessTokensPath() (string, error) { + // Azure-CLI allows user to customize the path of access tokens thorugh environment variable. + var accessTokenPath = os.Getenv("AZURE_ACCESS_TOKEN_FILE") + var err error + + // Fallback logic to default path on non-cloud-shell environment. + // TODO(#200): remove the dependency on hard-coding path. + if accessTokenPath == "" { + accessTokenPath, err = homedir.Expand("~/.azure/accessTokens.json") + } + + return accessTokenPath, err +} + +// ParseExpirationDate parses either a Azure CLI or CloudShell date into a time object +func ParseExpirationDate(input string) (*time.Time, error) { + // CloudShell (and potentially the Azure CLI in future) + expirationDate, cloudShellErr := time.Parse(time.RFC3339, input) + if cloudShellErr != nil { + // Azure CLI (Python) e.g. 2017-08-31 19:48:57.998857 (plus the local timezone) + const cliFormat = "2006-01-02 15:04:05.999999" + expirationDate, cliErr := time.ParseInLocation(cliFormat, input, time.Local) + if cliErr == nil { + return &expirationDate, nil + } + + return nil, fmt.Errorf("Error parsing expiration date %q.\n\nCloudShell Error: \n%+v\n\nCLI Error:\n%+v", input, cloudShellErr, cliErr) + } + + return &expirationDate, nil +} + +// LoadTokens restores a set of Token objects from a file located at 'path'. +func LoadTokens(path string) ([]Token, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open file (%s) while loading token: %v", path, err) + } + defer file.Close() + + var tokens []Token + + dec := json.NewDecoder(file) + if err = dec.Decode(&tokens); err != nil { + return nil, fmt.Errorf("failed to decode contents of file (%s) into a `cli.Token` representation: %v", path, err) + } + + return tokens, nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go new file mode 100644 index 000000000000..7e41f7fd99c9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments.go @@ -0,0 +1,191 @@ +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "strings" +) + +// EnvironmentFilepathName captures the name of the environment variable containing the path to the file +// to be used while populating the Azure Environment. +const EnvironmentFilepathName = "AZURE_ENVIRONMENT_FILEPATH" + +var environments = map[string]Environment{ + "AZURECHINACLOUD": ChinaCloud, + "AZUREGERMANCLOUD": GermanCloud, + "AZUREPUBLICCLOUD": PublicCloud, + "AZUREUSGOVERNMENTCLOUD": USGovernmentCloud, +} + +// Environment represents a set of endpoints for each of Azure's Clouds. +type Environment struct { + Name string `json:"name"` + ManagementPortalURL string `json:"managementPortalURL"` + PublishSettingsURL string `json:"publishSettingsURL"` + ServiceManagementEndpoint string `json:"serviceManagementEndpoint"` + ResourceManagerEndpoint string `json:"resourceManagerEndpoint"` + ActiveDirectoryEndpoint string `json:"activeDirectoryEndpoint"` + GalleryEndpoint string `json:"galleryEndpoint"` + KeyVaultEndpoint string `json:"keyVaultEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + ServiceBusEndpoint string `json:"serviceBusEndpoint"` + BatchManagementEndpoint string `json:"batchManagementEndpoint"` + StorageEndpointSuffix string `json:"storageEndpointSuffix"` + SQLDatabaseDNSSuffix string `json:"sqlDatabaseDNSSuffix"` + TrafficManagerDNSSuffix string `json:"trafficManagerDNSSuffix"` + KeyVaultDNSSuffix string `json:"keyVaultDNSSuffix"` + ServiceBusEndpointSuffix string `json:"serviceBusEndpointSuffix"` + ServiceManagementVMDNSSuffix string `json:"serviceManagementVMDNSSuffix"` + ResourceManagerVMDNSSuffix string `json:"resourceManagerVMDNSSuffix"` + ContainerRegistryDNSSuffix string `json:"containerRegistryDNSSuffix"` + TokenAudience string `json:"tokenAudience"` +} + +var ( + // PublicCloud is the default public Azure cloud environment + PublicCloud = Environment{ + Name: "AzurePublicCloud", + ManagementPortalURL: "https://manage.windowsazure.com/", + PublishSettingsURL: "https://manage.windowsazure.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.windows.net/", + ResourceManagerEndpoint: "https://management.azure.com/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.com/", + GalleryEndpoint: "https://gallery.azure.com/", + KeyVaultEndpoint: "https://vault.azure.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.windows.net/", + BatchManagementEndpoint: "https://batch.core.windows.net/", + StorageEndpointSuffix: "core.windows.net", + SQLDatabaseDNSSuffix: "database.windows.net", + TrafficManagerDNSSuffix: "trafficmanager.net", + KeyVaultDNSSuffix: "vault.azure.net", + ServiceBusEndpointSuffix: "servicebus.windows.net", + ServiceManagementVMDNSSuffix: "cloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.azure.com", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.azure.com/", + } + + // USGovernmentCloud is the cloud environment for the US Government + USGovernmentCloud = Environment{ + Name: "AzureUSGovernmentCloud", + ManagementPortalURL: "https://manage.windowsazure.us/", + PublishSettingsURL: "https://manage.windowsazure.us/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.usgovcloudapi.net/", + ResourceManagerEndpoint: "https://management.usgovcloudapi.net/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.us/", + GalleryEndpoint: "https://gallery.usgovcloudapi.net/", + KeyVaultEndpoint: "https://vault.usgovcloudapi.net/", + GraphEndpoint: "https://graph.windows.net/", + ServiceBusEndpoint: "https://servicebus.usgovcloudapi.net/", + BatchManagementEndpoint: "https://batch.core.usgovcloudapi.net/", + StorageEndpointSuffix: "core.usgovcloudapi.net", + SQLDatabaseDNSSuffix: "database.usgovcloudapi.net", + TrafficManagerDNSSuffix: "usgovtrafficmanager.net", + KeyVaultDNSSuffix: "vault.usgovcloudapi.net", + ServiceBusEndpointSuffix: "servicebus.usgovcloudapi.net", + ServiceManagementVMDNSSuffix: "usgovcloudapp.net", + ResourceManagerVMDNSSuffix: "cloudapp.windowsazure.us", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.usgovcloudapi.net/", + } + + // ChinaCloud is the cloud environment operated in China + ChinaCloud = Environment{ + Name: "AzureChinaCloud", + ManagementPortalURL: "https://manage.chinacloudapi.com/", + PublishSettingsURL: "https://manage.chinacloudapi.com/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.chinacloudapi.cn/", + ResourceManagerEndpoint: "https://management.chinacloudapi.cn/", + ActiveDirectoryEndpoint: "https://login.chinacloudapi.cn/", + GalleryEndpoint: "https://gallery.chinacloudapi.cn/", + KeyVaultEndpoint: "https://vault.azure.cn/", + GraphEndpoint: "https://graph.chinacloudapi.cn/", + ServiceBusEndpoint: "https://servicebus.chinacloudapi.cn/", + BatchManagementEndpoint: "https://batch.chinacloudapi.cn/", + StorageEndpointSuffix: "core.chinacloudapi.cn", + SQLDatabaseDNSSuffix: "database.chinacloudapi.cn", + TrafficManagerDNSSuffix: "trafficmanager.cn", + KeyVaultDNSSuffix: "vault.azure.cn", + ServiceBusEndpointSuffix: "servicebus.chinacloudapi.cn", + ServiceManagementVMDNSSuffix: "chinacloudapp.cn", + ResourceManagerVMDNSSuffix: "cloudapp.azure.cn", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.chinacloudapi.cn/", + } + + // GermanCloud is the cloud environment operated in Germany + GermanCloud = Environment{ + Name: "AzureGermanCloud", + ManagementPortalURL: "http://portal.microsoftazure.de/", + PublishSettingsURL: "https://manage.microsoftazure.de/publishsettings/index", + ServiceManagementEndpoint: "https://management.core.cloudapi.de/", + ResourceManagerEndpoint: "https://management.microsoftazure.de/", + ActiveDirectoryEndpoint: "https://login.microsoftonline.de/", + GalleryEndpoint: "https://gallery.cloudapi.de/", + KeyVaultEndpoint: "https://vault.microsoftazure.de/", + GraphEndpoint: "https://graph.cloudapi.de/", + ServiceBusEndpoint: "https://servicebus.cloudapi.de/", + BatchManagementEndpoint: "https://batch.cloudapi.de/", + StorageEndpointSuffix: "core.cloudapi.de", + SQLDatabaseDNSSuffix: "database.cloudapi.de", + TrafficManagerDNSSuffix: "azuretrafficmanager.de", + KeyVaultDNSSuffix: "vault.microsoftazure.de", + ServiceBusEndpointSuffix: "servicebus.cloudapi.de", + ServiceManagementVMDNSSuffix: "azurecloudapp.de", + ResourceManagerVMDNSSuffix: "cloudapp.microsoftazure.de", + ContainerRegistryDNSSuffix: "azurecr.io", + TokenAudience: "https://management.microsoftazure.de/", + } +) + +// EnvironmentFromName returns an Environment based on the common name specified. +func EnvironmentFromName(name string) (Environment, error) { + // IMPORTANT + // As per @radhikagupta5: + // This is technical debt, fundamentally here because Kubernetes is not currently accepting + // contributions to the providers. Once that is an option, the provider should be updated to + // directly call `EnvironmentFromFile`. Until then, we rely on dispatching Azure Stack environment creation + // from this method based on the name that is provided to us. + if strings.EqualFold(name, "AZURESTACKCLOUD") { + return EnvironmentFromFile(os.Getenv(EnvironmentFilepathName)) + } + + name = strings.ToUpper(name) + env, ok := environments[name] + if !ok { + return env, fmt.Errorf("autorest/azure: There is no cloud environment matching the name %q", name) + } + + return env, nil +} + +// EnvironmentFromFile loads an Environment from a configuration file available on disk. +// This function is particularly useful in the Hybrid Cloud model, where one must define their own +// endpoints. +func EnvironmentFromFile(location string) (unmarshaled Environment, err error) { + fileContents, err := ioutil.ReadFile(location) + if err != nil { + return + } + + err = json.Unmarshal(fileContents, &unmarshaled) + + return +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go new file mode 100644 index 000000000000..a75adb6f79bb --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/environments_test.go @@ -0,0 +1,348 @@ +// test +package azure + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "path" + "path/filepath" + "runtime" + "testing" +) + +// This correlates to the expected contents of ./testdata/test_environment_1.json +var testEnvironment1 = Environment{ + Name: "--unit-test--", + ManagementPortalURL: "--management-portal-url", + PublishSettingsURL: "--publish-settings-url--", + ServiceManagementEndpoint: "--service-management-endpoint--", + ResourceManagerEndpoint: "--resource-management-endpoint--", + ActiveDirectoryEndpoint: "--active-directory-endpoint--", + GalleryEndpoint: "--gallery-endpoint--", + KeyVaultEndpoint: "--key-vault--endpoint--", + GraphEndpoint: "--graph-endpoint--", + StorageEndpointSuffix: "--storage-endpoint-suffix--", + SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", + TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", + KeyVaultDNSSuffix: "--key-vault-dns-suffix--", + ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", + ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", + ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", + ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", + TokenAudience: "--token-audience", +} + +func TestEnvironment_EnvironmentFromURL_NoOverride_Success(t *testing.T) { + fileContents, _ := ioutil.ReadFile(filepath.Join("testdata", "test_metadata_environment_1.json")) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(fileContents)) + })) + defer ts.Close() + + got, err := EnvironmentFromURL(ts.URL) + + if err != nil { + t.Error(err) + } + if got.Name != "HybridEnvironment" { + t.Logf("got: %v want: HybridEnvironment", got.Name) + t.Fail() + } +} + +func TestEnvironment_EnvironmentFromURL_OverrideStorageSuffix_Success(t *testing.T) { + fileContents, _ := ioutil.ReadFile(filepath.Join("testdata", "test_metadata_environment_1.json")) + ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + w.Write([]byte(fileContents)) + })) + defer ts.Close() + overrideProperty := OverrideProperty{ + Key: EnvironmentStorageEndpointSuffix, + Value: "fakeStorageSuffix", + } + got, err := EnvironmentFromURL(ts.URL, overrideProperty) + + if err != nil { + t.Error(err) + } + if got.StorageEndpointSuffix != "fakeStorageSuffix" { + t.Logf("got: %v want: fakeStorageSuffix", got.StorageEndpointSuffix) + t.Fail() + } +} + +func TestEnvironment_EnvironmentFromURL_EmptyEndpoint_Failure(t *testing.T) { + _, err := EnvironmentFromURL("") + + if err == nil { + t.Fail() + } + if err.Error() != "Metadata resource manager endpoint is empty" { + t.Fail() + } +} + +func TestEnvironment_EnvironmentFromFile(t *testing.T) { + got, err := EnvironmentFromFile(filepath.Join("testdata", "test_environment_1.json")) + if err != nil { + t.Error(err) + } + + if got != testEnvironment1 { + t.Logf("got: %v want: %v", got, testEnvironment1) + t.Fail() + } +} + +func TestEnvironment_EnvironmentFromName_Stack(t *testing.T) { + _, currentFile, _, _ := runtime.Caller(0) + prevEnvFilepathValue := os.Getenv(EnvironmentFilepathName) + os.Setenv(EnvironmentFilepathName, filepath.Join(path.Dir(currentFile), "testdata", "test_environment_1.json")) + defer os.Setenv(EnvironmentFilepathName, prevEnvFilepathValue) + + got, err := EnvironmentFromName("AZURESTACKCLOUD") + if err != nil { + t.Error(err) + } + + if got != testEnvironment1 { + t.Logf("got: %v want: %v", got, testEnvironment1) + t.Fail() + } +} + +func TestEnvironmentFromName(t *testing.T) { + name := "azurechinacloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "AzureChinaCloud" + if env, _ := EnvironmentFromName(name); env != ChinaCloud { + t.Errorf("Expected to get ChinaCloud for %q", name) + } + + name = "azuregermancloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "AzureGermanCloud" + if env, _ := EnvironmentFromName(name); env != GermanCloud { + t.Errorf("Expected to get GermanCloud for %q", name) + } + + name = "azurepubliccloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "AzurePublicCloud" + if env, _ := EnvironmentFromName(name); env != PublicCloud { + t.Errorf("Expected to get PublicCloud for %q", name) + } + + name = "azureusgovernmentcloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "AzureUSGovernmentCloud" + if env, _ := EnvironmentFromName(name); env != USGovernmentCloud { + t.Errorf("Expected to get USGovernmentCloud for %q", name) + } + + name = "thisisnotarealcloudenv" + if _, err := EnvironmentFromName(name); err == nil { + t.Errorf("Expected to get an error for %q", name) + } +} + +func TestDeserializeEnvironment(t *testing.T) { + env := `{ + "name": "--name--", + "ActiveDirectoryEndpoint": "--active-directory-endpoint--", + "galleryEndpoint": "--gallery-endpoint--", + "graphEndpoint": "--graph-endpoint--", + "serviceBusEndpoint": "--service-bus-endpoint--", + "keyVaultDNSSuffix": "--key-vault-dns-suffix--", + "keyVaultEndpoint": "--key-vault-endpoint--", + "managementPortalURL": "--management-portal-url--", + "publishSettingsURL": "--publish-settings-url--", + "resourceManagerEndpoint": "--resource-manager-endpoint--", + "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", + "serviceManagementEndpoint": "--service-management-endpoint--", + "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", + "storageEndpointSuffix": "--storage-endpoint-suffix--", + "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", + "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", + "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--", + "containerRegistryDNSSuffix": "--container-registry-dns-suffix--" + }` + + testSubject := Environment{} + err := json.Unmarshal([]byte(env), &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if "--name--" != testSubject.Name { + t.Errorf("Expected Name to be \"--name--\", but got %q", testSubject.Name) + } + if "--management-portal-url--" != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be \"--management-portal-url--\", but got %q", testSubject.ManagementPortalURL) + } + if "--publish-settings-url--" != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be \"--publish-settings-url--\", but got %q", testSubject.PublishSettingsURL) + } + if "--service-management-endpoint--" != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be \"--service-management-endpoint--\", but got %q", testSubject.ServiceManagementEndpoint) + } + if "--resource-manager-endpoint--" != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be \"--resource-manager-endpoint--\", but got %q", testSubject.ResourceManagerEndpoint) + } + if "--active-directory-endpoint--" != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be \"--active-directory-endpoint--\", but got %q", testSubject.ActiveDirectoryEndpoint) + } + if "--gallery-endpoint--" != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be \"--gallery-endpoint--\", but got %q", testSubject.GalleryEndpoint) + } + if "--key-vault-endpoint--" != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be \"--key-vault-endpoint--\", but got %q", testSubject.KeyVaultEndpoint) + } + if "--service-bus-endpoint--" != testSubject.ServiceBusEndpoint { + t.Errorf("Expected ServiceBusEndpoint to be \"--service-bus-endpoint--\", but goet %q", testSubject.ServiceBusEndpoint) + } + if "--graph-endpoint--" != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be \"--graph-endpoint--\", but got %q", testSubject.GraphEndpoint) + } + if "--storage-endpoint-suffix--" != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--storage-endpoint-suffix--\", but got %q", testSubject.StorageEndpointSuffix) + } + if "--sql-database-dns-suffix--" != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected sql-database-dns-suffix to be \"--sql-database-dns-suffix--\", but got %q", testSubject.SQLDatabaseDNSSuffix) + } + if "--key-vault-dns-suffix--" != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--key-vault-dns-suffix--\", but got %q", testSubject.KeyVaultDNSSuffix) + } + if "--service-bus-endpoint-suffix--" != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be \"--service-bus-endpoint-suffix--\", but got %q", testSubject.ServiceBusEndpointSuffix) + } + if "--asm-vm-dns-suffix--" != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be \"--asm-vm-dns-suffix--\", but got %q", testSubject.ServiceManagementVMDNSSuffix) + } + if "--arm-vm-dns-suffix--" != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be \"--arm-vm-dns-suffix--\", but got %q", testSubject.ResourceManagerVMDNSSuffix) + } + if "--container-registry-dns-suffix--" != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be \"--container-registry-dns-suffix--\", but got %q", testSubject.ContainerRegistryDNSSuffix) + } +} + +func TestRoundTripSerialization(t *testing.T) { + env := Environment{ + Name: "--unit-test--", + ManagementPortalURL: "--management-portal-url", + PublishSettingsURL: "--publish-settings-url--", + ServiceManagementEndpoint: "--service-management-endpoint--", + ResourceManagerEndpoint: "--resource-management-endpoint--", + ActiveDirectoryEndpoint: "--active-directory-endpoint--", + GalleryEndpoint: "--gallery-endpoint--", + KeyVaultEndpoint: "--key-vault--endpoint--", + GraphEndpoint: "--graph-endpoint--", + ServiceBusEndpoint: "--service-bus-endpoint--", + StorageEndpointSuffix: "--storage-endpoint-suffix--", + SQLDatabaseDNSSuffix: "--sql-database-dns-suffix--", + TrafficManagerDNSSuffix: "--traffic-manager-dns-suffix--", + KeyVaultDNSSuffix: "--key-vault-dns-suffix--", + ServiceBusEndpointSuffix: "--service-bus-endpoint-suffix--", + ServiceManagementVMDNSSuffix: "--asm-vm-dns-suffix--", + ResourceManagerVMDNSSuffix: "--arm-vm-dns-suffix--", + ContainerRegistryDNSSuffix: "--container-registry-dns-suffix--", + } + + bytes, err := json.Marshal(env) + if err != nil { + t.Fatalf("failed to marshal: %s", err) + } + + testSubject := Environment{} + err = json.Unmarshal(bytes, &testSubject) + if err != nil { + t.Fatalf("failed to unmarshal: %s", err) + } + + if env.Name != testSubject.Name { + t.Errorf("Expected Name to be %q, but got %q", env.Name, testSubject.Name) + } + if env.ManagementPortalURL != testSubject.ManagementPortalURL { + t.Errorf("Expected ManagementPortalURL to be %q, but got %q", env.ManagementPortalURL, testSubject.ManagementPortalURL) + } + if env.PublishSettingsURL != testSubject.PublishSettingsURL { + t.Errorf("Expected PublishSettingsURL to be %q, but got %q", env.PublishSettingsURL, testSubject.PublishSettingsURL) + } + if env.ServiceManagementEndpoint != testSubject.ServiceManagementEndpoint { + t.Errorf("Expected ServiceManagementEndpoint to be %q, but got %q", env.ServiceManagementEndpoint, testSubject.ServiceManagementEndpoint) + } + if env.ResourceManagerEndpoint != testSubject.ResourceManagerEndpoint { + t.Errorf("Expected ResourceManagerEndpoint to be %q, but got %q", env.ResourceManagerEndpoint, testSubject.ResourceManagerEndpoint) + } + if env.ActiveDirectoryEndpoint != testSubject.ActiveDirectoryEndpoint { + t.Errorf("Expected ActiveDirectoryEndpoint to be %q, but got %q", env.ActiveDirectoryEndpoint, testSubject.ActiveDirectoryEndpoint) + } + if env.GalleryEndpoint != testSubject.GalleryEndpoint { + t.Errorf("Expected GalleryEndpoint to be %q, but got %q", env.GalleryEndpoint, testSubject.GalleryEndpoint) + } + if env.ServiceBusEndpoint != testSubject.ServiceBusEndpoint { + t.Errorf("Expected ServiceBusEnpoint to be %q, but got %q", env.ServiceBusEndpoint, testSubject.ServiceBusEndpoint) + } + if env.KeyVaultEndpoint != testSubject.KeyVaultEndpoint { + t.Errorf("Expected KeyVaultEndpoint to be %q, but got %q", env.KeyVaultEndpoint, testSubject.KeyVaultEndpoint) + } + if env.GraphEndpoint != testSubject.GraphEndpoint { + t.Errorf("Expected GraphEndpoint to be %q, but got %q", env.GraphEndpoint, testSubject.GraphEndpoint) + } + if env.StorageEndpointSuffix != testSubject.StorageEndpointSuffix { + t.Errorf("Expected StorageEndpointSuffix to be %q, but got %q", env.StorageEndpointSuffix, testSubject.StorageEndpointSuffix) + } + if env.SQLDatabaseDNSSuffix != testSubject.SQLDatabaseDNSSuffix { + t.Errorf("Expected SQLDatabaseDNSSuffix to be %q, but got %q", env.SQLDatabaseDNSSuffix, testSubject.SQLDatabaseDNSSuffix) + } + if env.TrafficManagerDNSSuffix != testSubject.TrafficManagerDNSSuffix { + t.Errorf("Expected TrafficManagerDNSSuffix to be %q, but got %q", env.TrafficManagerDNSSuffix, testSubject.TrafficManagerDNSSuffix) + } + if env.KeyVaultDNSSuffix != testSubject.KeyVaultDNSSuffix { + t.Errorf("Expected KeyVaultDNSSuffix to be %q, but got %q", env.KeyVaultDNSSuffix, testSubject.KeyVaultDNSSuffix) + } + if env.ServiceBusEndpointSuffix != testSubject.ServiceBusEndpointSuffix { + t.Errorf("Expected ServiceBusEndpointSuffix to be %q, but got %q", env.ServiceBusEndpointSuffix, testSubject.ServiceBusEndpointSuffix) + } + if env.ServiceManagementVMDNSSuffix != testSubject.ServiceManagementVMDNSSuffix { + t.Errorf("Expected ServiceManagementVMDNSSuffix to be %q, but got %q", env.ServiceManagementVMDNSSuffix, testSubject.ServiceManagementVMDNSSuffix) + } + if env.ResourceManagerVMDNSSuffix != testSubject.ResourceManagerVMDNSSuffix { + t.Errorf("Expected ResourceManagerVMDNSSuffix to be %q, but got %q", env.ResourceManagerVMDNSSuffix, testSubject.ResourceManagerVMDNSSuffix) + } + if env.ContainerRegistryDNSSuffix != testSubject.ContainerRegistryDNSSuffix { + t.Errorf("Expected ContainerRegistryDNSSuffix to be %q, but got %q", env.ContainerRegistryDNSSuffix, testSubject.ContainerRegistryDNSSuffix) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md b/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md new file mode 100644 index 000000000000..b87e173fa031 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/example/README.md @@ -0,0 +1,127 @@ +# autorest azure example + +## Usage (device mode) + +This shows how to use the example for device auth. + +1. Execute this. It will save your token to /tmp/azure-example-token: + + ``` + ./example -tenantId "13de0a15-b5db-44b9-b682-b4ba82afbd29" -subscriptionId "aff271ee-e9be-4441-b9bb-42f5af4cbaeb" -mode "device" -tokenCachePath "/tmp/azure-example-token" + ``` + +2. Execute it again, it will load the token from cache and not prompt for auth again. + +## Usage (certificate mode) + +This example covers how to make an authenticated call to the Azure Resource Manager APIs, using certificate-based authentication. + +0. Export some required variables + + ``` + export SUBSCRIPTION_ID="aff271ee-e9be-4441-b9bb-42f5af4cbaeb" + export TENANT_ID="13de0a15-b5db-44b9-b682-b4ba82afbd29" + export RESOURCE_GROUP="someresourcegroup" + ``` + + * replace both values with your own + +1. Create a private key + + ``` + openssl genrsa -out "example.key" 2048 + ``` + + + +2. Create the certificate + + ``` + openssl req -new -key "example.key" -subj "/CN=example" -out "example.csr" + + openssl x509 -req -in "example.csr" -signkey "example.key" -out "example.crt" -days 10000 + ``` + + + +3. Create the PKCS12 version of the certificate (with no password) + + ``` + openssl pkcs12 -export -out "example.pfx" -inkey "example.key" -in "example.crt" -passout pass: + ``` + + + +4. Register a new Azure AD Application with the certificate contents + + ``` + certificateContents="$(tail -n+2 "example.key" | head -n-1)" + + azure ad app create \ + --name "example-azuread-app" \ + --home-page="http://example-azuread-app/home" \ + --identifier-uris "http://example-azuread-app/app" \ + --key-usage "Verify" \ + --end-date "2020-01-01" \ + --key-value "${certificateContents}" + ``` + + + +5. Create a new service principal using the "Application Id" from the previous step + + ``` + azure ad sp create "APPLICATION_ID" + ``` + + * Replace APPLICATION_ID with the "Application Id" returned in step 4 + + + +6. Grant your service principal necessary permissions + + ``` + azure role assignment create \ + --resource-group "${RESOURCE_GROUP}" \ + --roleName "Contributor" \ + --subscription "${SUBSCRIPTION_ID}" \ + --spn "http://example-azuread-app/app" + ``` + + * Replace SUBSCRIPTION_ID with your subscription id + * Replace RESOURCE_GROUP with the resource group for the assignment + * Ensure that the `spn` parameter matches an `identifier-url` from Step 4 + + + +7. Run this example app to see your resource groups + + ``` + go run main.go \ + --tenantId="${TENANT_ID}" \ + --subscriptionId="${SUBSCRIPTION_ID}" \ + --applicationId="http://example-azuread-app/app" \ + --certificatePath="certificate.pfx" + ``` + + +You should see something like this as output: + +``` +2015/11/08 18:28:39 Using these settings: +2015/11/08 18:28:39 * certificatePath: certificate.pfx +2015/11/08 18:28:39 * applicationID: http://example-azuread-app/app +2015/11/08 18:28:39 * tenantID: 13de0a15-b5db-44b9-b682-b4ba82afbd29 +2015/11/08 18:28:39 * subscriptionID: aff271ee-e9be-4441-b9bb-42f5af4cbaeb +2015/11/08 18:28:39 loading certificate... +2015/11/08 18:28:39 retrieve oauth token... +2015/11/08 18:28:39 querying the list of resource groups... +2015/11/08 18:28:50 +2015/11/08 18:28:50 Groups: {"value":[{"id":"/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/kube-66f30810","name":"kube-66f30810","location":"westus","tags":{},"properties":{"provisioningState":"Succeeded"}}]} +``` + + + +## Notes + +You may need to wait sometime between executing step 4, step 5 and step 6. If you issue those requests too quickly, you might hit an AD server that is not consistent with the server where the resource was created. diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go b/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go new file mode 100644 index 000000000000..d1de683a6c60 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/example/main.go @@ -0,0 +1,272 @@ +package main + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/json" + "flag" + "fmt" + "io/ioutil" + "log" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" + "golang.org/x/crypto/pkcs12" +) + +const ( + resourceGroupURLTemplate = "https://management.azure.com" + apiVersion = "2015-01-01" + nativeAppClientID = "a87032a7-203c-4bf7-913c-44c50d23409a" + resource = "https://management.core.windows.net/" +) + +var ( + mode string + tenantID string + subscriptionID string + applicationID string + + tokenCachePath string + forceRefresh bool + impatient bool + + certificatePath string +) + +func init() { + flag.StringVar(&mode, "mode", "device", "mode of operation for SPT creation") + flag.StringVar(&certificatePath, "certificatePath", "", "path to pk12/pfx certificate") + flag.StringVar(&applicationID, "applicationId", "", "application id") + flag.StringVar(&tenantID, "tenantId", "", "tenant id") + flag.StringVar(&subscriptionID, "subscriptionId", "", "subscription id") + flag.StringVar(&tokenCachePath, "tokenCachePath", "", "location of oauth token cache") + flag.BoolVar(&forceRefresh, "forceRefresh", false, "pass true to force a token refresh") + + flag.Parse() + + log.Printf("mode(%s) certPath(%s) appID(%s) tenantID(%s), subID(%s)\n", + mode, certificatePath, applicationID, tenantID, subscriptionID) + + if mode == "certificate" && + (strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "") { + log.Fatalln("Bad usage. Using certificate mode. Please specify tenantID, subscriptionID") + } + + if mode != "certificate" && mode != "device" { + log.Fatalln("Bad usage. Mode must be one of 'certificate' or 'device'.") + } + + if mode == "device" && strings.TrimSpace(applicationID) == "" { + log.Println("Using device mode auth. Will use `azkube` clientID since none was specified on the comand line.") + applicationID = nativeAppClientID + } + + if mode == "certificate" && strings.TrimSpace(certificatePath) == "" { + log.Fatalln("Bad usage. Mode 'certificate' requires the 'certificatePath' argument.") + } + + if strings.TrimSpace(tenantID) == "" || strings.TrimSpace(subscriptionID) == "" || strings.TrimSpace(applicationID) == "" { + log.Fatalln("Bad usage. Must specify the 'tenantId' and 'subscriptionId'") + } +} + +func getSptFromCachedToken(oauthConfig adal.OAuthConfig, clientID, resource string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + token, err := adal.LoadToken(tokenCachePath) + if err != nil { + return nil, fmt.Errorf("failed to load token from cache: %v", err) + } + + spt, _ := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + + return spt, nil +} + +func decodePkcs12(pkcs []byte, password string) (*x509.Certificate, *rsa.PrivateKey, error) { + privateKey, certificate, err := pkcs12.Decode(pkcs, password) + if err != nil { + return nil, nil, err + } + + rsaPrivateKey, isRsaKey := privateKey.(*rsa.PrivateKey) + if !isRsaKey { + return nil, nil, fmt.Errorf("PKCS#12 certificate must contain an RSA private key") + } + + return certificate, rsaPrivateKey, nil +} + +func getSptFromCertificate(oauthConfig adal.OAuthConfig, clientID, resource, certicatePath string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + certData, err := ioutil.ReadFile(certificatePath) + if err != nil { + return nil, fmt.Errorf("failed to read the certificate file (%s): %v", certificatePath, err) + } + + certificate, rsaPrivateKey, err := decodePkcs12(certData, "") + if err != nil { + return nil, fmt.Errorf("failed to decode pkcs12 certificate while creating spt: %v", err) + } + + spt, _ := adal.NewServicePrincipalTokenFromCertificate( + oauthConfig, + clientID, + certificate, + rsaPrivateKey, + resource, + callbacks...) + + return spt, nil +} + +func getSptFromDeviceFlow(oauthConfig adal.OAuthConfig, clientID, resource string, callbacks ...adal.TokenRefreshCallback) (*adal.ServicePrincipalToken, error) { + oauthClient := &autorest.Client{} + deviceCode, err := adal.InitiateDeviceAuth(oauthClient, oauthConfig, clientID, resource) + if err != nil { + return nil, fmt.Errorf("failed to start device auth flow: %s", err) + } + + fmt.Println(*deviceCode.Message) + + token, err := adal.WaitForUserCompletion(oauthClient, deviceCode) + if err != nil { + return nil, fmt.Errorf("failed to finish device auth flow: %s", err) + } + + spt, err := adal.NewServicePrincipalTokenFromManualToken( + oauthConfig, + clientID, + resource, + *token, + callbacks...) + if err != nil { + return nil, fmt.Errorf("failed to get oauth token from device flow: %v", err) + } + + return spt, nil +} + +func printResourceGroups(client *autorest.Client) error { + p := map[string]interface{}{"subscription-id": subscriptionID} + q := map[string]interface{}{"api-version": apiVersion} + + req, _ := autorest.Prepare(&http.Request{}, + autorest.AsGet(), + autorest.WithBaseURL(resourceGroupURLTemplate), + autorest.WithPathParameters("/subscriptions/{subscription-id}/resourcegroups", p), + autorest.WithQueryParameters(q)) + + resp, err := autorest.SendWithSender(client, req) + if err != nil { + return err + } + + value := struct { + ResourceGroups []struct { + Name string `json:"name"` + } `json:"value"` + }{} + + defer resp.Body.Close() + dec := json.NewDecoder(resp.Body) + err = dec.Decode(&value) + if err != nil { + return err + } + + var groupNames = make([]string, len(value.ResourceGroups)) + for i, name := range value.ResourceGroups { + groupNames[i] = name.Name + } + + log.Println("Groups:", strings.Join(groupNames, ", ")) + return err +} + +func saveToken(spt adal.Token) { + if tokenCachePath != "" { + err := adal.SaveToken(tokenCachePath, 0600, spt) + if err != nil { + log.Println("error saving token", err) + } else { + log.Println("saved token to", tokenCachePath) + } + } +} + +func main() { + var spt *adal.ServicePrincipalToken + var err error + + callback := func(t adal.Token) error { + log.Println("refresh callback was called") + saveToken(t) + return nil + } + + oauthConfig, err := adal.NewOAuthConfig(azure.PublicCloud.ActiveDirectoryEndpoint, tenantID) + if err != nil { + panic(err) + } + + if tokenCachePath != "" { + log.Println("tokenCachePath specified; attempting to load from", tokenCachePath) + spt, err = getSptFromCachedToken(*oauthConfig, applicationID, resource, callback) + if err != nil { + spt = nil // just in case, this is the condition below + log.Println("loading from cache failed:", err) + } + } + + if spt == nil { + log.Println("authenticating via 'mode'", mode) + switch mode { + case "device": + spt, err = getSptFromDeviceFlow(*oauthConfig, applicationID, resource, callback) + case "certificate": + spt, err = getSptFromCertificate(*oauthConfig, applicationID, resource, certificatePath, callback) + } + if err != nil { + log.Fatalln("failed to retrieve token:", err) + } + + // should save it as soon as you get it since Refresh won't be called for some time + if tokenCachePath != "" { + saveToken(spt.Token()) + } + } + + client := &autorest.Client{} + client.Authorizer = autorest.NewBearerAuthorizer(spt) + + printResourceGroups(client) + + if forceRefresh { + err = spt.Refresh() + if err != nil { + panic(err) + } + printResourceGroups(client) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go new file mode 100644 index 000000000000..507f9e95cf11 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/metadata_environment.go @@ -0,0 +1,245 @@ +package azure + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + + "github.com/Azure/go-autorest/autorest" +) + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +type audience []string + +type authentication struct { + LoginEndpoint string `json:"loginEndpoint"` + Audiences audience `json:"audiences"` +} + +type environmentMetadataInfo struct { + GalleryEndpoint string `json:"galleryEndpoint"` + GraphEndpoint string `json:"graphEndpoint"` + PortalEndpoint string `json:"portalEndpoint"` + Authentication authentication `json:"authentication"` +} + +// EnvironmentProperty represent property names that clients can override +type EnvironmentProperty string + +const ( + // EnvironmentName ... + EnvironmentName EnvironmentProperty = "name" + // EnvironmentManagementPortalURL .. + EnvironmentManagementPortalURL EnvironmentProperty = "managementPortalURL" + // EnvironmentPublishSettingsURL ... + EnvironmentPublishSettingsURL EnvironmentProperty = "publishSettingsURL" + // EnvironmentServiceManagementEndpoint ... + EnvironmentServiceManagementEndpoint EnvironmentProperty = "serviceManagementEndpoint" + // EnvironmentResourceManagerEndpoint ... + EnvironmentResourceManagerEndpoint EnvironmentProperty = "resourceManagerEndpoint" + // EnvironmentActiveDirectoryEndpoint ... + EnvironmentActiveDirectoryEndpoint EnvironmentProperty = "activeDirectoryEndpoint" + // EnvironmentGalleryEndpoint ... + EnvironmentGalleryEndpoint EnvironmentProperty = "galleryEndpoint" + // EnvironmentKeyVaultEndpoint ... + EnvironmentKeyVaultEndpoint EnvironmentProperty = "keyVaultEndpoint" + // EnvironmentGraphEndpoint ... + EnvironmentGraphEndpoint EnvironmentProperty = "graphEndpoint" + // EnvironmentServiceBusEndpoint ... + EnvironmentServiceBusEndpoint EnvironmentProperty = "serviceBusEndpoint" + // EnvironmentBatchManagementEndpoint ... + EnvironmentBatchManagementEndpoint EnvironmentProperty = "batchManagementEndpoint" + // EnvironmentStorageEndpointSuffix ... + EnvironmentStorageEndpointSuffix EnvironmentProperty = "storageEndpointSuffix" + // EnvironmentSQLDatabaseDNSSuffix ... + EnvironmentSQLDatabaseDNSSuffix EnvironmentProperty = "sqlDatabaseDNSSuffix" + // EnvironmentTrafficManagerDNSSuffix ... + EnvironmentTrafficManagerDNSSuffix EnvironmentProperty = "trafficManagerDNSSuffix" + // EnvironmentKeyVaultDNSSuffix ... + EnvironmentKeyVaultDNSSuffix EnvironmentProperty = "keyVaultDNSSuffix" + // EnvironmentServiceBusEndpointSuffix ... + EnvironmentServiceBusEndpointSuffix EnvironmentProperty = "serviceBusEndpointSuffix" + // EnvironmentServiceManagementVMDNSSuffix ... + EnvironmentServiceManagementVMDNSSuffix EnvironmentProperty = "serviceManagementVMDNSSuffix" + // EnvironmentResourceManagerVMDNSSuffix ... + EnvironmentResourceManagerVMDNSSuffix EnvironmentProperty = "resourceManagerVMDNSSuffix" + // EnvironmentContainerRegistryDNSSuffix ... + EnvironmentContainerRegistryDNSSuffix EnvironmentProperty = "containerRegistryDNSSuffix" + // EnvironmentTokenAudience ... + EnvironmentTokenAudience EnvironmentProperty = "tokenAudience" +) + +// OverrideProperty represents property name and value that clients can override +type OverrideProperty struct { + Key EnvironmentProperty + Value string +} + +// EnvironmentFromURL loads an Environment from a URL +// This function is particularly useful in the Hybrid Cloud model, where one may define their own +// endpoints. +func EnvironmentFromURL(resourceManagerEndpoint string, properties ...OverrideProperty) (environment Environment, err error) { + var metadataEnvProperties environmentMetadataInfo + + if resourceManagerEndpoint == "" { + return environment, fmt.Errorf("Metadata resource manager endpoint is empty") + } + + if metadataEnvProperties, err = retrieveMetadataEnvironment(resourceManagerEndpoint); err != nil { + return environment, err + } + + // Give priority to user's override values + overrideProperties(&environment, properties) + + if environment.Name == "" { + environment.Name = "HybridEnvironment" + } + stampDNSSuffix := environment.StorageEndpointSuffix + if stampDNSSuffix == "" { + stampDNSSuffix = strings.TrimSuffix(strings.TrimPrefix(strings.Replace(resourceManagerEndpoint, strings.Split(resourceManagerEndpoint, ".")[0], "", 1), "."), "/") + environment.StorageEndpointSuffix = stampDNSSuffix + } + if environment.KeyVaultDNSSuffix == "" { + environment.KeyVaultDNSSuffix = fmt.Sprintf("%s.%s", "vault", stampDNSSuffix) + } + if environment.KeyVaultEndpoint == "" { + environment.KeyVaultEndpoint = fmt.Sprintf("%s%s", "https://", environment.KeyVaultDNSSuffix) + } + if environment.TokenAudience == "" { + environment.TokenAudience = metadataEnvProperties.Authentication.Audiences[0] + } + if environment.ActiveDirectoryEndpoint == "" { + environment.ActiveDirectoryEndpoint = metadataEnvProperties.Authentication.LoginEndpoint + } + if environment.ResourceManagerEndpoint == "" { + environment.ResourceManagerEndpoint = resourceManagerEndpoint + } + if environment.GalleryEndpoint == "" { + environment.GalleryEndpoint = metadataEnvProperties.GalleryEndpoint + } + if environment.GraphEndpoint == "" { + environment.GraphEndpoint = metadataEnvProperties.GraphEndpoint + } + + return environment, nil +} + +func overrideProperties(environment *Environment, properties []OverrideProperty) { + for _, property := range properties { + switch property.Key { + case EnvironmentName: + { + environment.Name = property.Value + } + case EnvironmentManagementPortalURL: + { + environment.ManagementPortalURL = property.Value + } + case EnvironmentPublishSettingsURL: + { + environment.PublishSettingsURL = property.Value + } + case EnvironmentServiceManagementEndpoint: + { + environment.ServiceManagementEndpoint = property.Value + } + case EnvironmentResourceManagerEndpoint: + { + environment.ResourceManagerEndpoint = property.Value + } + case EnvironmentActiveDirectoryEndpoint: + { + environment.ActiveDirectoryEndpoint = property.Value + } + case EnvironmentGalleryEndpoint: + { + environment.GalleryEndpoint = property.Value + } + case EnvironmentKeyVaultEndpoint: + { + environment.KeyVaultEndpoint = property.Value + } + case EnvironmentGraphEndpoint: + { + environment.GraphEndpoint = property.Value + } + case EnvironmentServiceBusEndpoint: + { + environment.ServiceBusEndpoint = property.Value + } + case EnvironmentBatchManagementEndpoint: + { + environment.BatchManagementEndpoint = property.Value + } + case EnvironmentStorageEndpointSuffix: + { + environment.StorageEndpointSuffix = property.Value + } + case EnvironmentSQLDatabaseDNSSuffix: + { + environment.SQLDatabaseDNSSuffix = property.Value + } + case EnvironmentTrafficManagerDNSSuffix: + { + environment.TrafficManagerDNSSuffix = property.Value + } + case EnvironmentKeyVaultDNSSuffix: + { + environment.KeyVaultDNSSuffix = property.Value + } + case EnvironmentServiceBusEndpointSuffix: + { + environment.ServiceBusEndpointSuffix = property.Value + } + case EnvironmentServiceManagementVMDNSSuffix: + { + environment.ServiceManagementVMDNSSuffix = property.Value + } + case EnvironmentResourceManagerVMDNSSuffix: + { + environment.ResourceManagerVMDNSSuffix = property.Value + } + case EnvironmentContainerRegistryDNSSuffix: + { + environment.ContainerRegistryDNSSuffix = property.Value + } + case EnvironmentTokenAudience: + { + environment.TokenAudience = property.Value + } + } + } +} + +func retrieveMetadataEnvironment(endpoint string) (environment environmentMetadataInfo, err error) { + client := autorest.NewClientWithUserAgent("") + managementEndpoint := fmt.Sprintf("%s%s", strings.TrimSuffix(endpoint, "/"), "/metadata/endpoints?api-version=1.0") + req, _ := http.NewRequest("GET", managementEndpoint, nil) + response, err := client.Do(req) + if err != nil { + return environment, err + } + defer response.Body.Close() + jsonResponse, err := ioutil.ReadAll(response.Body) + if err != nil { + return environment, err + } + err = json.Unmarshal(jsonResponse, &environment) + return environment, err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go new file mode 100644 index 000000000000..bd34f0ed5a58 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp.go @@ -0,0 +1,200 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Azure/go-autorest/autorest" +) + +// DoRetryWithRegistration tries to register the resource provider in case it is unregistered. +// It also handles request retries +func DoRetryWithRegistration(client autorest.Client) autorest.SendDecorator { + return func(s autorest.Sender) autorest.Sender { + return autorest.SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := autorest.NewRetriableRequest(r) + for currentAttempt := 0; currentAttempt < client.RetryAttempts; currentAttempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + + resp, err = autorest.SendWithSender(s, rr.Request(), + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return resp, err + } + + if resp.StatusCode != http.StatusConflict || client.SkipResourceProviderRegistration { + return resp, err + } + var re RequestError + err = autorest.Respond( + resp, + autorest.ByUnmarshallingJSON(&re), + ) + if err != nil { + return resp, err + } + err = re + + if re.ServiceError != nil && re.ServiceError.Code == "MissingSubscriptionRegistration" { + regErr := register(client, r, re) + if regErr != nil { + return resp, fmt.Errorf("failed auto registering Resource Provider: %s. Original error: %s", regErr, err) + } + } + } + return resp, err + }) + } +} + +func getProvider(re RequestError) (string, error) { + if re.ServiceError != nil && len(re.ServiceError.Details) > 0 { + return re.ServiceError.Details[0]["target"].(string), nil + } + return "", errors.New("provider was not found in the response") +} + +func register(client autorest.Client, originalReq *http.Request, re RequestError) error { + subID := getSubscription(originalReq.URL.Path) + if subID == "" { + return errors.New("missing parameter subscriptionID to register resource provider") + } + providerName, err := getProvider(re) + if err != nil { + return fmt.Errorf("missing parameter provider to register resource provider: %s", err) + } + newURL := url.URL{ + Scheme: originalReq.URL.Scheme, + Host: originalReq.URL.Host, + } + + // taken from the resources SDK + // with almost identical code, this sections are easier to mantain + // It is also not a good idea to import the SDK here + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L252 + pathParameters := map[string]interface{}{ + "resourceProviderNamespace": autorest.Encode("path", providerName), + "subscriptionId": autorest.Encode("path", subID), + } + + const APIVersion = "2016-09-01" + queryParameters := map[string]interface{}{ + "api-version": APIVersion, + } + + preparer := autorest.CreatePreparer( + autorest.AsPost(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}/register", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + + req, err := preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + type Provider struct { + RegistrationState *string `json:"registrationState,omitempty"` + } + var provider Provider + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + // poll for registered provisioning state + now := time.Now() + for err == nil && time.Since(now) < client.PollingDuration { + // taken from the resources SDK + // https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45 + preparer := autorest.CreatePreparer( + autorest.AsGet(), + autorest.WithBaseURL(newURL.String()), + autorest.WithPathParameters("/subscriptions/{subscriptionId}/providers/{resourceProviderNamespace}", pathParameters), + autorest.WithQueryParameters(queryParameters), + ) + req, err = preparer.Prepare(&http.Request{}) + if err != nil { + return err + } + req = req.WithContext(originalReq.Context()) + + resp, err := autorest.SendWithSender(client, req, + autorest.DoRetryForStatusCodes(client.RetryAttempts, client.RetryDuration, autorest.StatusCodesForRetry...), + ) + if err != nil { + return err + } + + err = autorest.Respond( + resp, + WithErrorUnlessStatusCode(http.StatusOK), + autorest.ByUnmarshallingJSON(&provider), + autorest.ByClosing(), + ) + if err != nil { + return err + } + + if provider.RegistrationState != nil && + *provider.RegistrationState == "Registered" { + break + } + + delayed := autorest.DelayWithRetryAfter(resp, originalReq.Context().Done()) + if !delayed && !autorest.DelayForBackoff(client.PollingDelay, 0, originalReq.Context().Done()) { + return originalReq.Context().Err() + } + } + if !(time.Since(now) < client.PollingDuration) { + return errors.New("polling for resource provider registration has exceeded the polling duration") + } + return err +} + +func getSubscription(path string) string { + parts := strings.Split(path, "/") + for i, v := range parts { + if v == "subscriptions" && (i+1) < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go b/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go new file mode 100644 index 000000000000..f01355c0c5e1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/rp_test.go @@ -0,0 +1,169 @@ +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package azure + +import ( + "context" + "net/http" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/mocks" +) + +func TestDoRetryWithRegistration(t *testing.T) { + client := mocks.NewSender() + // first response, should retry because it is a transient error + client.AppendResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError)) + // response indicates the resource provider has not been registered + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "error":{ + "code":"MissingSubscriptionRegistration", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions.", + "details":[ + { + "code":"MissingSubscriptionRegistration", + "target":"Microsoft.EventGrid", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions." + } + ] + } +} +`), http.StatusConflict, "MissingSubscriptionRegistration")) + // first poll response, still not ready + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "registrationState": "Registering" +} +`), http.StatusOK, "200 OK")) + // last poll response, respurce provider has been registered + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "registrationState": "Registered" +} +`), http.StatusOK, "200 OK")) + // retry original request, response is successful + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + req := mocks.NewRequestForURL("https://lol/subscriptions/rofl") + req.Body = mocks.NewBody("lolol") + r, err := autorest.SendWithSender(client, req, + DoRetryWithRegistration(autorest.Client{ + PollingDelay: time.Second, + PollingDuration: time.Second * 10, + RetryAttempts: 5, + RetryDuration: time.Second, + Sender: client, + }), + ) + if err != nil { + t.Fatalf("got error: %v", err) + } + + autorest.Respond(r, + autorest.ByDiscardingBody(), + autorest.ByClosing(), + ) + + if r.StatusCode != http.StatusOK { + t.Fatalf("azure: Sender#DoRetryWithRegistration -- Got: StatusCode %v; Want: StatusCode 200 OK", r.StatusCode) + } +} + +func TestDoRetrySkipRegistration(t *testing.T) { + client := mocks.NewSender() + // first response, should retry because it is a transient error + client.AppendResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError)) + // response indicates the resource provider has not been registered + client.AppendResponse(mocks.NewResponseWithBodyAndStatus(mocks.NewBody(`{ + "error":{ + "code":"MissingSubscriptionRegistration", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions.", + "details":[ + { + "code":"MissingSubscriptionRegistration", + "target":"Microsoft.EventGrid", + "message":"The subscription registration is in 'Unregistered' state. The subscription must be registered to use namespace 'Microsoft.EventGrid'. See https://aka.ms/rps-not-found for how to register subscriptions." + } + ] + } +}`), http.StatusConflict, "MissingSubscriptionRegistration")) + + req := mocks.NewRequestForURL("https://lol/subscriptions/rofl") + req.Body = mocks.NewBody("lolol") + r, err := autorest.SendWithSender(client, req, + DoRetryWithRegistration(autorest.Client{ + PollingDelay: time.Second, + PollingDuration: time.Second * 10, + RetryAttempts: 5, + RetryDuration: time.Second, + Sender: client, + SkipResourceProviderRegistration: true, + }), + ) + if err != nil { + t.Fatalf("got error: %v", err) + } + + autorest.Respond(r, + autorest.ByDiscardingBody(), + autorest.ByClosing(), + ) + + if r.StatusCode != http.StatusConflict { + t.Fatalf("azure: Sender#DoRetryWithRegistration -- Got: StatusCode %v; Want: StatusCode 409 Conflict", r.StatusCode) + } +} + +func TestDoRetryWithRegistration_CanBeCancelled(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + delay := 5 * time.Second + + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("Internal server error", http.StatusInternalServerError), 5) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + end := time.Now() + var err error + + go func() { + req := mocks.NewRequestForURL("https://lol/subscriptions/rofl") + req = req.WithContext(ctx) + req.Body = mocks.NewBody("lolol") + _, err = autorest.SendWithSender(client, req, + DoRetryWithRegistration(autorest.Client{ + PollingDelay: time.Second, + PollingDuration: delay, + RetryAttempts: 5, + RetryDuration: time.Second, + Sender: client, + SkipResourceProviderRegistration: true, + }), + ) + end = time.Now() + wg.Done() + }() + cancel() + wg.Wait() + time.Sleep(5 * time.Millisecond) + if err == nil { + t.Fatalf("azure: DoRetryWithRegistration didn't cancel") + } + if end.Sub(start) >= delay { + t.Fatalf("azure: DoRetryWithRegistration failed to cancel") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_environment_1.json b/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_environment_1.json new file mode 100644 index 000000000000..d1d136d751a9 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_environment_1.json @@ -0,0 +1,20 @@ +{ + "name": "--unit-test--", + "managementPortalURL": "--management-portal-url", + "publishSettingsURL": "--publish-settings-url--", + "serviceManagementEndpoint": "--service-management-endpoint--", + "resourceManagerEndpoint": "--resource-management-endpoint--", + "activeDirectoryEndpoint": "--active-directory-endpoint--", + "galleryEndpoint": "--gallery-endpoint--", + "keyVaultEndpoint": "--key-vault--endpoint--", + "graphEndpoint": "--graph-endpoint--", + "storageEndpointSuffix": "--storage-endpoint-suffix--", + "sqlDatabaseDNSSuffix": "--sql-database-dns-suffix--", + "trafficManagerDNSSuffix": "--traffic-manager-dns-suffix--", + "keyVaultDNSSuffix": "--key-vault-dns-suffix--", + "serviceBusEndpointSuffix": "--service-bus-endpoint-suffix--", + "serviceManagementVMDNSSuffix": "--asm-vm-dns-suffix--", + "resourceManagerVMDNSSuffix": "--arm-vm-dns-suffix--", + "containerRegistryDNSSuffix": "--container-registry-dns-suffix--", + "tokenAudience": "--token-audience" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_metadata_environment_1.json b/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_metadata_environment_1.json new file mode 100644 index 000000000000..0dc812ab22d7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/azure/testdata/test_metadata_environment_1.json @@ -0,0 +1,11 @@ +{ + "galleryEndpoint":"https://portal.local.azurestack.external:30015/", + "graphEndpoint":"https://graph.windows.net/", + "portalEndpoint":"https://portal.local.azurestack.external/", + "authentication":{ + "loginEndpoint":"https://login.windows.net/", + "audiences":[ + "https://management.azurestackci04.onmicrosoft.com/3ee899c8-c137-4ce4-b230-619961a09a73" + ] + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client.go b/vendor/github.com/Azure/go-autorest/autorest/client.go new file mode 100644 index 000000000000..5c558c83a7e5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client.go @@ -0,0 +1,267 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "net/http/cookiejar" + "strings" + "time" + + "github.com/Azure/go-autorest/logger" + "github.com/Azure/go-autorest/version" +) + +const ( + // DefaultPollingDelay is a reasonable delay between polling requests. + DefaultPollingDelay = 60 * time.Second + + // DefaultPollingDuration is a reasonable total polling duration. + DefaultPollingDuration = 15 * time.Minute + + // DefaultRetryAttempts is number of attempts for retry status codes (5xx). + DefaultRetryAttempts = 3 + + // DefaultRetryDuration is the duration to wait between retries. + DefaultRetryDuration = 30 * time.Second +) + +var ( + // StatusCodesForRetry are a defined group of status code for which the client will retry + StatusCodesForRetry = []int{ + http.StatusRequestTimeout, // 408 + http.StatusTooManyRequests, // 429 + http.StatusInternalServerError, // 500 + http.StatusBadGateway, // 502 + http.StatusServiceUnavailable, // 503 + http.StatusGatewayTimeout, // 504 + } +) + +const ( + requestFormat = `HTTP Request Begin =================================================== +%s +===================================================== HTTP Request End +` + responseFormat = `HTTP Response Begin =================================================== +%s +===================================================== HTTP Response End +` +) + +// Response serves as the base for all responses from generated clients. It provides access to the +// last http.Response. +type Response struct { + *http.Response `json:"-"` +} + +// LoggingInspector implements request and response inspectors that log the full request and +// response to a supplied log. +type LoggingInspector struct { + Logger *log.Logger +} + +// WithInspection returns a PrepareDecorator that emits the http.Request to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + var body, b bytes.Buffer + + defer r.Body.Close() + + r.Body = ioutil.NopCloser(io.TeeReader(r.Body, &body)) + if err := r.Write(&b); err != nil { + return nil, fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(requestFormat, b.String()) + + r.Body = ioutil.NopCloser(&body) + return p.Prepare(r) + }) + } +} + +// ByInspecting returns a RespondDecorator that emits the http.Response to the supplied logger. The +// body is restored after being emitted. +// +// Note: Since it reads the entire Body, this decorator should not be used where body streaming is +// important. It is best used to trace JSON or similar body values. +func (li LoggingInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + var body, b bytes.Buffer + defer resp.Body.Close() + resp.Body = ioutil.NopCloser(io.TeeReader(resp.Body, &body)) + if err := resp.Write(&b); err != nil { + return fmt.Errorf("Failed to write response: %v", err) + } + + li.Logger.Printf(responseFormat, b.String()) + + resp.Body = ioutil.NopCloser(&body) + return r.Respond(resp) + }) + } +} + +// Client is the base for autorest generated clients. It provides default, "do nothing" +// implementations of an Authorizer, RequestInspector, and ResponseInspector. It also returns the +// standard, undecorated http.Client as a default Sender. +// +// Generated clients should also use Error (see NewError and NewErrorWithError) for errors and +// return responses that compose with Response. +// +// Most customization of generated clients is best achieved by supplying a custom Authorizer, custom +// RequestInspector, and / or custom ResponseInspector. Users may log requests, implement circuit +// breakers (see https://msdn.microsoft.com/en-us/library/dn589784.aspx) or otherwise influence +// sending the request by providing a decorated Sender. +type Client struct { + Authorizer Authorizer + Sender Sender + RequestInspector PrepareDecorator + ResponseInspector RespondDecorator + + // PollingDelay sets the polling frequency used in absence of a Retry-After HTTP header + PollingDelay time.Duration + + // PollingDuration sets the maximum polling time after which an error is returned. + PollingDuration time.Duration + + // RetryAttempts sets the default number of retry attempts for client. + RetryAttempts int + + // RetryDuration sets the delay duration for retries. + RetryDuration time.Duration + + // UserAgent, if not empty, will be set as the HTTP User-Agent header on all requests sent + // through the Do method. + UserAgent string + + Jar http.CookieJar + + // Set to true to skip attempted registration of resource providers (false by default). + SkipResourceProviderRegistration bool +} + +// NewClientWithUserAgent returns an instance of a Client with the UserAgent set to the passed +// string. +func NewClientWithUserAgent(ua string) Client { + c := Client{ + PollingDelay: DefaultPollingDelay, + PollingDuration: DefaultPollingDuration, + RetryAttempts: DefaultRetryAttempts, + RetryDuration: DefaultRetryDuration, + UserAgent: version.UserAgent(), + } + c.Sender = c.sender() + c.AddToUserAgent(ua) + return c +} + +// AddToUserAgent adds an extension to the current user agent +func (c *Client) AddToUserAgent(extension string) error { + if extension != "" { + c.UserAgent = fmt.Sprintf("%s %s", c.UserAgent, extension) + return nil + } + return fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent) +} + +// Do implements the Sender interface by invoking the active Sender after applying authorization. +// If Sender is not set, it uses a new instance of http.Client. In both cases it will, if UserAgent +// is set, apply set the User-Agent header. +func (c Client) Do(r *http.Request) (*http.Response, error) { + if r.UserAgent() == "" { + r, _ = Prepare(r, + WithUserAgent(c.UserAgent)) + } + // NOTE: c.WithInspection() must be last in the list so that it can inspect all preceding operations + r, err := Prepare(r, + c.WithAuthorization(), + c.WithInspection()) + if err != nil { + var resp *http.Response + if detErr, ok := err.(DetailedError); ok { + // if the authorization failed (e.g. invalid credentials) there will + // be a response associated with the error, be sure to return it. + resp = detErr.Response + } + return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed") + } + logger.Instance.WriteRequest(r, logger.Filter{ + Header: func(k string, v []string) (bool, []string) { + // remove the auth token from the log + if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") { + v = []string{"**REDACTED**"} + } + return true, v + }, + }) + resp, err := SendWithSender(c.sender(), r) + logger.Instance.WriteResponse(resp, logger.Filter{}) + Respond(resp, c.ByInspecting()) + return resp, err +} + +// sender returns the Sender to which to send requests. +func (c Client) sender() Sender { + if c.Sender == nil { + j, _ := cookiejar.New(nil) + return &http.Client{Jar: j} + } + return c.Sender +} + +// WithAuthorization is a convenience method that returns the WithAuthorization PrepareDecorator +// from the current Authorizer. If not Authorizer is set, it uses the NullAuthorizer. +func (c Client) WithAuthorization() PrepareDecorator { + return c.authorizer().WithAuthorization() +} + +// authorizer returns the Authorizer to use. +func (c Client) authorizer() Authorizer { + if c.Authorizer == nil { + return NullAuthorizer{} + } + return c.Authorizer +} + +// WithInspection is a convenience method that passes the request to the supplied RequestInspector, +// if present, or returns the WithNothing PrepareDecorator otherwise. +func (c Client) WithInspection() PrepareDecorator { + if c.RequestInspector == nil { + return WithNothing() + } + return c.RequestInspector +} + +// ByInspecting is a convenience method that passes the response to the supplied ResponseInspector, +// if present, or returns the ByIgnoring RespondDecorator otherwise. +func (c Client) ByInspecting() RespondDecorator { + if c.ResponseInspector == nil { + return ByIgnoring() + } + return c.ResponseInspector +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/client_test.go b/vendor/github.com/Azure/go-autorest/autorest/client_test.go new file mode 100644 index 000000000000..0008600c2343 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/client_test.go @@ -0,0 +1,403 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io/ioutil" + "log" + "math/rand" + "net/http" + "net/http/httptest" + "reflect" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" + "github.com/Azure/go-autorest/version" +) + +func TestLoggingInspectorWithInspection(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(mocks.NewRequestWithContent("Content"), + c.WithInspection()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + if _, err := Prepare(r, + c.WithInspection()); err != nil { + t.Error(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not record Request to the log") + } +} + +func TestLoggingInspectorWithInspectionRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewRequestWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.RequestInspector = li.WithInspection() + + Prepare(r, + c.WithInspection()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#WithInspection did not restore the Request body") + } +} + +func TestLoggingInspectorByInspecting(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(mocks.NewResponseWithContent("Content"), + c.ByInspecting()) + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingEmitsErrors(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + if err := Respond(r, + c.ByInspecting()); err != nil { + t.Fatal(err) + } + + if len(b.String()) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspection did not record Response to the log") + } +} + +func TestLoggingInspectorByInspectingRestoresBody(t *testing.T) { + b := bytes.Buffer{} + c := Client{} + r := mocks.NewResponseWithContent("Content") + li := LoggingInspector{Logger: log.New(&b, "", 0)} + c.ResponseInspector = li.ByInspecting() + + Respond(r, + c.ByInspecting()) + + s, _ := ioutil.ReadAll(r.Body) + if len(s) <= 0 { + t.Fatal("autorest: LoggingInspector#ByInspecting did not restore the Response body") + } +} + +func TestNewClientWithUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + completeUA := fmt.Sprintf("%s %s", version.UserAgent(), ua) + + if c.UserAgent != completeUA { + t.Fatalf("autorest: NewClientWithUserAgent failed to set the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } +} + +func TestAddToUserAgent(t *testing.T) { + ua := "UserAgent" + c := NewClientWithUserAgent(ua) + ext := "extension" + err := c.AddToUserAgent(ext) + if err != nil { + t.Fatalf("autorest: AddToUserAgent returned error -- expected nil, received %s", err) + } + completeUA := fmt.Sprintf("%s %s %s", version.UserAgent(), ua, ext) + + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to add an extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } + + err = c.AddToUserAgent("") + if err == nil { + t.Fatalf("autorest: AddToUserAgent didn't return error -- expected %s, received nil", + fmt.Errorf("Extension was empty, User Agent stayed as %s", c.UserAgent)) + } + if c.UserAgent != completeUA { + t.Fatalf("autorest: AddToUserAgent failed to not add an empty extension to the UserAgent -- expected %s, received %s", + completeUA, c.UserAgent) + } +} + +func TestClientSenderReturnsHttpClientByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.sender()) != "*http.Client" { + t.Fatal("autorest: Client#sender failed to return http.Client by default") + } +} + +func TestClientSenderReturnsSetSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + if c.sender() != s { + t.Fatal("autorest: Client#sender failed to return set Sender") + } +} + +func TestClientDoInvokesSender(t *testing.T) { + c := Client{} + + s := mocks.NewSender() + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() != 1 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientDoSetsUserAgent(t *testing.T) { + ua := "UserAgent" + c := Client{UserAgent: ua} + r := mocks.NewRequest() + s := mocks.NewSender() + c.Sender = s + + c.Do(r) + + if r.UserAgent() != ua { + t.Fatalf("autorest: Client#Do failed to correctly set User-Agent header: %s=%s", + http.CanonicalHeaderKey(headerUserAgent), r.UserAgent()) + } +} + +func TestClientDoSetsAuthorization(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + c := Client{Authorizer: mockAuthorizer{}, Sender: s} + + c.Do(r) + if len(r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) <= 0 { + t.Fatalf("autorest: Client#Send failed to set Authorization header -- %s=%s", + http.CanonicalHeaderKey(headerAuthorization), + r.Header.Get(http.CanonicalHeaderKey(headerAuthorization))) + } +} + +func TestClientDoInvokesRequestInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{RequestInspector: i.WithInspection(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the RequestInspector") + } +} + +func TestClientDoInvokesResponseInspector(t *testing.T) { + r := mocks.NewRequest() + s := mocks.NewSender() + i := &mockInspector{} + c := Client{ResponseInspector: i.ByInspecting(), Sender: s} + + c.Do(r) + if !i.wasInvoked { + t.Fatal("autorest: Client#Send failed to invoke the ResponseInspector") + } +} + +func TestClientDoReturnsErrorIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + _, err := c.Do(&http.Request{}) + if err == nil { + t.Fatalf("autorest: Client#Do failed to return an error when Prepare failed") + } +} + +func TestClientDoDoesNotSendIfPrepareFails(t *testing.T) { + c := Client{} + s := mocks.NewSender() + c.Authorizer = mockFailingAuthorizer{} + c.Sender = s + + c.Do(&http.Request{}) + if s.Attempts() > 0 { + t.Fatal("autorest: Client#Do failed to invoke the Sender") + } +} + +func TestClientAuthorizerReturnsNullAuthorizerByDefault(t *testing.T) { + c := Client{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.NullAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the NullAuthorizer by default") + } +} + +func TestClientAuthorizerReturnsSetAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + if fmt.Sprintf("%T", c.authorizer()) != "autorest.mockAuthorizer" { + t.Fatal("autorest: Client#authorizer failed to return the set Authorizer") + } +} + +func TestClientWithAuthorizer(t *testing.T) { + c := Client{} + c.Authorizer = mockAuthorizer{} + + req, _ := Prepare(&http.Request{}, + c.WithAuthorization()) + + if req.Header.Get(headerAuthorization) == "" { + t.Fatal("autorest: Client#WithAuthorizer failed to return the WithAuthorizer from the active Authorizer") + } +} + +func TestClientWithInspection(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.RequestInspector = r.WithInspection() + + Prepare(&http.Request{}, + c.WithInspection()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#WithInspection failed to invoke RequestInspector") + } +} + +func TestClientWithInspectionSetsDefault(t *testing.T) { + c := Client{} + + r1 := &http.Request{} + r2, _ := Prepare(r1, + c.WithInspection()) + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("autorest: Client#WithInspection failed to provide a default RequestInspector") + } +} + +func TestClientByInspecting(t *testing.T) { + c := Client{} + r := &mockInspector{} + c.ResponseInspector = r.ByInspecting() + + Respond(&http.Response{}, + c.ByInspecting()) + + if !r.wasInvoked { + t.Fatal("autorest: Client#ByInspecting failed to invoke ResponseInspector") + } +} + +func TestClientByInspectingSetsDefault(t *testing.T) { + c := Client{} + + r := &http.Response{} + Respond(r, + c.ByInspecting()) + + if !reflect.DeepEqual(r, &http.Response{}) { + t.Fatal("autorest: Client#ByInspecting failed to provide a default ResponseInspector") + } +} + +func TestCookies(t *testing.T) { + second := "second" + expected := http.Cookie{ + Name: "tastes", + Value: "delicious", + } + + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.SetCookie(w, &expected) + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: ioutil.ReadAll failed reading request body: %s", err) + } + if string(b) == second { + cookie, err := r.Cookie(expected.Name) + if err != nil { + t.Fatalf("autorest: r.Cookie could not get request cookie: %s", err) + } + if cookie == nil { + t.Fatalf("autorest: got nil cookie, expecting %v", expected) + } + if cookie.Value != expected.Value { + t.Fatalf("autorest: got cookie value '%s', expecting '%s'", cookie.Value, expected.Name) + } + } + })) + defer server.Close() + + client := NewClientWithUserAgent("") + _, err := SendWithSender(client, mocks.NewRequestForURL(server.URL)) + if err != nil { + t.Fatalf("autorest: first request failed: %s", err) + } + + r2, err := http.NewRequest(http.MethodGet, server.URL, mocks.NewBody(second)) + if err != nil { + t.Fatalf("autorest: failed creating second request: %s", err) + } + + _, err = SendWithSender(client, r2) + if err != nil { + t.Fatalf("autorest: second request failed: %s", err) + } +} + +func randomString(n int) string { + const chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789" + r := rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + s := make([]byte, n) + for i := range s { + s[i] = chars[r.Intn(len(chars))] + } + return string(s) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date.go b/vendor/github.com/Azure/go-autorest/autorest/date/date.go new file mode 100644 index 000000000000..c4571065685a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date.go @@ -0,0 +1,96 @@ +/* +Package date provides time.Time derivatives that conform to the Swagger.io (https://swagger.io/) +defined date formats: Date and DateTime. Both types may, in most cases, be used in lieu of +time.Time types. And both convert to time.Time through a ToTime method. +*/ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "time" +) + +const ( + fullDate = "2006-01-02" + fullDateJSON = `"2006-01-02"` + dateFormat = "%04d-%02d-%02d" + jsonFormat = `"%04d-%02d-%02d"` +) + +// Date defines a type similar to time.Time but assumes a layout of RFC3339 full-date (i.e., +// 2006-01-02). +type Date struct { + time.Time +} + +// ParseDate create a new Date from the passed string. +func ParseDate(date string) (d Date, err error) { + return parseDate(date, fullDate) +} + +func parseDate(date string, format string) (Date, error) { + d, err := time.Parse(format, date) + return Date{Time: d}, err +} + +// MarshalBinary preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalBinary() ([]byte, error) { + return d.MarshalText() +} + +// UnmarshalBinary reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalBinary(data []byte) error { + return d.UnmarshalText(data) +} + +// MarshalJSON preserves the Date as a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalJSON() (json []byte, err error) { + return []byte(fmt.Sprintf(jsonFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalJSON reconstitutes the Date from a JSON string conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalJSON(data []byte) (err error) { + d.Time, err = time.Parse(fullDateJSON, string(data)) + return err +} + +// MarshalText preserves the Date as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d Date) MarshalText() (text []byte, err error) { + return []byte(fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day())), nil +} + +// UnmarshalText reconstitutes a Date saved as a byte array conforming to RFC3339 full-date (i.e., +// 2006-01-02). +func (d *Date) UnmarshalText(data []byte) (err error) { + d.Time, err = time.Parse(fullDate, string(data)) + return err +} + +// String returns the Date formatted as an RFC3339 full-date string (i.e., 2006-01-02). +func (d Date) String() string { + return fmt.Sprintf(dateFormat, d.Year(), d.Month(), d.Day()) +} + +// ToTime returns a Date as a time.Time +func (d Date) ToTime() time.Time { + return d.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go new file mode 100644 index 000000000000..4a40bbc3e1e4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/date_test.go @@ -0,0 +1,237 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + + t, err := time.Parse(time.RFC3339, "2001-02-04T00:00:00Z") + if err != nil { + fmt.Println(err) + } + + // Date acts as time.Time when the receiver + if d.Before(t) { + fmt.Printf("Before ") + } else { + fmt.Printf("After ") + } + + // Convert Date when needing a time.Time + if t.After(d.ToTime()) { + fmt.Printf("After") + } else { + fmt.Printf("Before") + } + // Output: Before After +} + +func ExampleDate_MarshalBinary() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalBinary() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalJSON() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03" +} + +func ExampleDate_UnmarshalJSON() { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "2001-02-03"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Date) + // Output: 2001-02-03 +} + +func ExampleDate_MarshalText() { + d, err := ParseDate("2001-02-03") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03 +} + +func ExampleDate_UnmarshalText() { + d := Date{} + t := "2001-02-03" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03 +} + +func TestDateString(t *testing.T) { + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: String failed (%v)", err) + } + if d.String() != "2001-02-03" { + t.Fatalf("date: String failed (%v)", d.String()) + } +} + +func TestDateBinaryRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: MarshalBinary failed (%v)", err) + } + + d2 := Date{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestDateJSONRoundTrip(t *testing.T) { + type s struct { + Date Date `json:"date"` + } + var err error + d1 := s{} + d1.Date, err = ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestDateTextRoundTrip(t *testing.T) { + d1, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: MarshalText failed (%v)", err) + } + d2 := Date{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestDateToTime(t *testing.T) { + var d Date + d, err := ParseDate("2001-02-03") + if err != nil { + t.Fatalf("date: ParseDate failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestDateUnmarshalJSONReturnsError(t *testing.T) { + var d struct { + Date Date `json:"date"` + } + j := `{"date" : "February 3, 2001"}` + + if err := json.Unmarshal([]byte(j), &d); err == nil { + t.Fatal("date: Date failed to return error for malformed JSON date") + } +} + +func TestDateUnmarshalTextReturnsError(t *testing.T) { + d := Date{} + txt := "February 3, 2001" + + if err := d.UnmarshalText([]byte(txt)); err == nil { + t.Fatal("date: Date failed to return error for malformed Text date") + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time.go b/vendor/github.com/Azure/go-autorest/autorest/date/time.go new file mode 100644 index 000000000000..b453fad0491e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time.go @@ -0,0 +1,103 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "regexp" + "time" +) + +// Azure reports time in UTC but it doesn't include the 'Z' time zone suffix in some cases. +const ( + azureUtcFormatJSON = `"2006-01-02T15:04:05.999999999"` + azureUtcFormat = "2006-01-02T15:04:05.999999999" + rfc3339JSON = `"` + time.RFC3339Nano + `"` + rfc3339 = time.RFC3339Nano + tzOffsetRegex = `(Z|z|\+|-)(\d+:\d+)*"*$` +) + +// Time defines a type similar to time.Time but assumes a layout of RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +type Time struct { + time.Time +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalBinary() ([]byte, error) { + return t.Time.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalJSON() (json []byte, err error) { + return t.Time.MarshalJSON() +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalJSON(data []byte) (err error) { + timeFormat := azureUtcFormatJSON + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339JSON + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// MarshalText preserves the Time as a byte array conforming to RFC3339 date-time (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) MarshalText() (text []byte, err error) { + return t.Time.MarshalText() +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC3339 date-time +// (i.e., 2006-01-02T15:04:05Z). +func (t *Time) UnmarshalText(data []byte) (err error) { + timeFormat := azureUtcFormat + match, err := regexp.Match(tzOffsetRegex, data) + if err != nil { + return err + } else if match { + timeFormat = rfc3339 + } + t.Time, err = ParseTime(timeFormat, string(data)) + return err +} + +// String returns the Time formatted as an RFC3339 date-time string (i.e., +// 2006-01-02T15:04:05Z). +func (t Time) String() string { + // Note: time.Time.String does not return an RFC3339 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} + +// ToTime returns a Time as a time.Time +func (t Time) ToTime() time.Time { + return t.Time +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go new file mode 100644 index 000000000000..d8c811707abd --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/time_test.go @@ -0,0 +1,277 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleParseTime() { + d, _ := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + fmt.Println(d) + // Output: 2001-02-03 04:05:06 +0000 UTC +} + +func ExampleTime_MarshalBinary() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + t, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalBinary() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalJSON() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "2001-02-03T04:05:06Z" +} + +func ExampleTime_UnmarshalJSON() { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06Z"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_MarshalText() { + d, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: 2001-02-03T04:05:06Z +} + +func ExampleTime_UnmarshalText() { + d := Time{} + t := "2001-02-03T04:05:06Z" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2001-02-03T04:05:06Z +} + +func TestUnmarshalTextforInvalidDate(t *testing.T) { + d := Time{} + dt := "2001-02-03T04:05:06AAA" + + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestUnmarshalJSONforInvalidDate(t *testing.T) { + d := Time{} + dt := `"2001-02-03T04:05:06AAA"` + + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: Time#Unmarshal was expecting error for invalid date") + } +} + +func TestTimeString(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := Time{ti} + if d.String() != "2001-02-03T04:05:06Z" { + t.Fatalf("date: Time#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForError(t *testing.T) { + d := Time{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: Time#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: Time#MarshalBinary failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: Time#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date:Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTrip(t *testing.T) { + type s struct { + Time Time `json:"datetime"` + } + + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + + d1 := s{Time: Time{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: Time#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: Time#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTrip(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + d1 := Time{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: Time#MarshalText failed (%v)", err) + } + + d2 := Time{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTime(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + d := Time{ti} + if err != nil { + t.Fatalf("date: Time#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} + +func TestUnmarshalJSONNoOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "2001-02-03T04:05:06.789"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONPosOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1980-01-02T00:11:35.01+01:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalJSONNegOffset(t *testing.T) { + var d struct { + Time Time `json:"datetime"` + } + j := `{"datetime" : "1492-10-12T10:15:01.789-08:00"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + t.Fatalf("date: Time#Unmarshal failed (%v)", err) + } +} + +func TestUnmarshalTextNoOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextPosOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06+00:30" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} + +func TestUnmarshalTextNegOffset(t *testing.T) { + d := Time{} + t1 := "2001-02-03T04:05:06-11:00" + + if err := d.UnmarshalText([]byte(t1)); err != nil { + t.Fatalf("date: Time#UnmarshalText failed (%v)", err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go new file mode 100644 index 000000000000..48fb39ba9b9a --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123.go @@ -0,0 +1,100 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "errors" + "time" +) + +const ( + rfc1123JSON = `"` + time.RFC1123 + `"` + rfc1123 = time.RFC1123 +) + +// TimeRFC1123 defines a type similar to time.Time but assumes a layout of RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +type TimeRFC1123 struct { + time.Time +} + +// UnmarshalJSON reconstitutes the Time from a JSON string conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalJSON(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123JSON, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalJSON preserves the Time as a JSON string conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalJSON() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalJSON: year outside of range [0,9999]") + } + b := []byte(t.Format(rfc1123JSON)) + return b, nil +} + +// MarshalText preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalText() ([]byte, error) { + if y := t.Year(); y < 0 || y >= 10000 { + return nil, errors.New("Time.MarshalText: year outside of range [0,9999]") + } + + b := []byte(t.Format(rfc1123)) + return b, nil +} + +// UnmarshalText reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalText(data []byte) (err error) { + t.Time, err = ParseTime(rfc1123, string(data)) + if err != nil { + return err + } + return nil +} + +// MarshalBinary preserves the Time as a byte array conforming to RFC1123 date-time (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) MarshalBinary() ([]byte, error) { + return t.MarshalText() +} + +// UnmarshalBinary reconstitutes a Time saved as a byte array conforming to RFC1123 date-time +// (i.e., Mon, 02 Jan 2006 15:04:05 MST). +func (t *TimeRFC1123) UnmarshalBinary(data []byte) error { + return t.UnmarshalText(data) +} + +// ToTime returns a Time as a time.Time +func (t TimeRFC1123) ToTime() time.Time { + return t.Time +} + +// String returns the Time formatted as an RFC1123 date-time string (i.e., +// Mon, 02 Jan 2006 15:04:05 MST). +func (t TimeRFC1123) String() string { + // Note: time.Time.String does not return an RFC1123 compliant string, time.Time.MarshalText does. + b, err := t.MarshalText() + if err != nil { + return "" + } + return string(b) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go new file mode 100644 index 000000000000..dfd640513db8 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/timerfc1123_test.go @@ -0,0 +1,226 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "encoding/json" + "fmt" + "reflect" + "testing" + "time" +) + +func ExampleTimeRFC1123() { + d, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: 2006-01-02 15:04:05 +0000 MST +} + +func ExampleTimeRFC1123_MarshalBinary() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + b, err := d.MarshalBinary() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(b)) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_UnmarshalBinary() { + d := TimeRFC1123{} + t := "Mon, 02 Jan 2006 15:04:05 MST" + if err := d.UnmarshalBinary([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalJSON() { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + j, err := json.Marshal(d) + if err != nil { + fmt.Println(err) + } + fmt.Println(string(j)) + // Output: "Mon, 02 Jan 2006 15:04:05 MST" +} + +func TestTimeRFC1123MarshalJSONInvalid(t *testing.T) { + ti := time.Date(20000, 01, 01, 00, 00, 00, 00, time.UTC) + d := TimeRFC1123{ti} + if _, err := json.Marshal(d); err == nil { + t.Fatalf("date: TimeRFC1123#Marshal failed for invalid date") + } +} + +func ExampleTimeRFC1123_UnmarshalJSON() { + var d struct { + Time TimeRFC1123 `json:"datetime"` + } + j := `{"datetime" : "Mon, 02 Jan 2006 15:04:05 MST"}` + + if err := json.Unmarshal([]byte(j), &d); err != nil { + fmt.Println(err) + } + fmt.Println(d.Time) + // Output: Mon, 02 Jan 2006 15:04:05 MST +} + +func ExampleTimeRFC1123_MarshalText() { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + t, err := d.MarshalText() + if err != nil { + fmt.Println(err) + } + fmt.Println(string(t)) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func ExampleTimeRFC1123_UnmarshalText() { + d := TimeRFC1123{} + t := "Sat, 03 Feb 2001 04:05:06 UTC" + + if err := d.UnmarshalText([]byte(t)); err != nil { + fmt.Println(err) + } + fmt.Println(d) + // Output: Sat, 03 Feb 2001 04:05:06 UTC +} + +func TestUnmarshalJSONforInvalidDateRfc1123(t *testing.T) { + dt := `"Mon, 02 Jan 2000000 15:05 MST"` + d := TimeRFC1123{} + if err := d.UnmarshalJSON([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestUnmarshalTextforInvalidDateRfc1123(t *testing.T) { + dt := "Mon, 02 Jan 2000000 15:05 MST" + d := TimeRFC1123{} + if err := d.UnmarshalText([]byte(dt)); err == nil { + t.Fatalf("date: TimeRFC1123#Unmarshal failed for invalid date") + } +} + +func TestTimeStringRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + fmt.Println(err) + } + d := TimeRFC1123{ti} + if d.String() != "Mon, 02 Jan 2006 15:04:05 MST" { + t.Fatalf("date: TimeRFC1123#String failed (%v)", d.String()) + } +} + +func TestTimeStringReturnsEmptyStringForErrorRfc1123(t *testing.T) { + d := TimeRFC1123{Time: time.Date(20000, 01, 01, 01, 01, 01, 01, time.UTC)} + if d.String() != "" { + t.Fatalf("date: TimeRFC1123#String failed empty string for an error") + } +} + +func TestTimeBinaryRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc3339, "2001-02-03T04:05:06Z") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{ti} + t1, err := d1.MarshalBinary() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalBinary failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalBinary(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalBinary failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Binary failed (%v, %v)", d1, d2) + } +} + +func TestTimeJSONRoundTripRfc1123(t *testing.T) { + type s struct { + Time TimeRFC1123 `json:"datetime"` + } + var err error + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := s{Time: TimeRFC1123{ti}} + j, err := json.Marshal(d1) + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalJSON failed (%v)", err) + } + + d2 := s{} + if err = json.Unmarshal(j, &d2); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalJSON failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip JSON failed (%v, %v)", d1, d2) + } +} + +func TestTimeTextRoundTripRfc1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + d1 := TimeRFC1123{Time: ti} + t1, err := d1.MarshalText() + if err != nil { + t.Fatalf("date: TimeRFC1123#MarshalText failed (%v)", err) + } + + d2 := TimeRFC1123{} + if err = d2.UnmarshalText(t1); err != nil { + t.Fatalf("date: TimeRFC1123#UnmarshalText failed (%v)", err) + } + + if !reflect.DeepEqual(d1, d2) { + t.Fatalf("date: Round-trip Text failed (%v, %v)", d1, d2) + } +} + +func TestTimeToTimeRFC1123(t *testing.T) { + ti, err := ParseTime(rfc1123, "Mon, 02 Jan 2006 15:04:05 MST") + d := TimeRFC1123{ti} + if err != nil { + t.Fatalf("date: TimeRFC1123#ParseTime failed (%v)", err) + } + var _ time.Time = d.ToTime() +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go new file mode 100644 index 000000000000..7073959b2a9c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime.go @@ -0,0 +1,123 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "time" +) + +// unixEpoch is the moment in time that should be treated as timestamp 0. +var unixEpoch = time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + +// UnixTime marshals and unmarshals a time that is represented as the number +// of seconds (ignoring skip-seconds) since the Unix Epoch. +type UnixTime time.Time + +// Duration returns the time as a Duration since the UnixEpoch. +func (t UnixTime) Duration() time.Duration { + return time.Time(t).Sub(unixEpoch) +} + +// NewUnixTimeFromSeconds creates a UnixTime as a number of seconds from the UnixEpoch. +func NewUnixTimeFromSeconds(seconds float64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(seconds * float64(time.Second))) +} + +// NewUnixTimeFromNanoseconds creates a UnixTime as a number of nanoseconds from the UnixEpoch. +func NewUnixTimeFromNanoseconds(nanoseconds int64) UnixTime { + return NewUnixTimeFromDuration(time.Duration(nanoseconds)) +} + +// NewUnixTimeFromDuration creates a UnixTime as a duration of time since the UnixEpoch. +func NewUnixTimeFromDuration(dur time.Duration) UnixTime { + return UnixTime(unixEpoch.Add(dur)) +} + +// UnixEpoch retreives the moment considered the Unix Epoch. I.e. The time represented by '0' +func UnixEpoch() time.Time { + return unixEpoch +} + +// MarshalJSON preserves the UnixTime as a JSON number conforming to Unix Timestamp requirements. +// (i.e. the number of seconds since midnight January 1st, 1970 not considering leap seconds.) +func (t UnixTime) MarshalJSON() ([]byte, error) { + buffer := &bytes.Buffer{} + enc := json.NewEncoder(buffer) + err := enc.Encode(float64(time.Time(t).UnixNano()) / 1e9) + if err != nil { + return nil, err + } + return buffer.Bytes(), nil +} + +// UnmarshalJSON reconstitures a UnixTime saved as a JSON number of the number of seconds since +// midnight January 1st, 1970. +func (t *UnixTime) UnmarshalJSON(text []byte) error { + dec := json.NewDecoder(bytes.NewReader(text)) + + var secondsSinceEpoch float64 + if err := dec.Decode(&secondsSinceEpoch); err != nil { + return err + } + + *t = NewUnixTimeFromSeconds(secondsSinceEpoch) + + return nil +} + +// MarshalText stores the number of seconds since the Unix Epoch as a textual floating point number. +func (t UnixTime) MarshalText() ([]byte, error) { + cast := time.Time(t) + return cast.MarshalText() +} + +// UnmarshalText populates a UnixTime with a value stored textually as a floating point number of seconds since the Unix Epoch. +func (t *UnixTime) UnmarshalText(raw []byte) error { + var unmarshaled time.Time + + if err := unmarshaled.UnmarshalText(raw); err != nil { + return err + } + + *t = UnixTime(unmarshaled) + return nil +} + +// MarshalBinary converts a UnixTime into a binary.LittleEndian float64 of nanoseconds since the epoch. +func (t UnixTime) MarshalBinary() ([]byte, error) { + buf := &bytes.Buffer{} + + payload := int64(t.Duration()) + + if err := binary.Write(buf, binary.LittleEndian, &payload); err != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +// UnmarshalBinary converts a from a binary.LittleEndian float64 of nanoseconds since the epoch into a UnixTime. +func (t *UnixTime) UnmarshalBinary(raw []byte) error { + var nanosecondsSinceEpoch int64 + + if err := binary.Read(bytes.NewReader(raw), binary.LittleEndian, &nanosecondsSinceEpoch); err != nil { + return err + } + *t = NewUnixTimeFromNanoseconds(nanosecondsSinceEpoch) + return nil +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go new file mode 100644 index 000000000000..3fa347c00c38 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/unixtime_test.go @@ -0,0 +1,283 @@ +// +build go1.7 + +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "math" + "testing" + "time" +) + +func ExampleUnixTime_MarshalJSON() { + epoch := UnixTime(UnixEpoch()) + text, _ := json.Marshal(epoch) + fmt.Print(string(text)) + // Output: 0 +} + +func ExampleUnixTime_UnmarshalJSON() { + var myTime UnixTime + json.Unmarshal([]byte("1.3e2"), &myTime) + fmt.Printf("%v", time.Time(myTime)) + // Output: 1970-01-01 00:02:10 +0000 UTC +} + +func TestUnixTime_MarshalJSON(t *testing.T) { + testCases := []time.Time{ + UnixEpoch().Add(-1 * time.Second), // One second befote the Unix Epoch + time.Date(2017, time.April, 14, 20, 27, 47, 0, time.UTC), // The time this test was written + UnixEpoch(), + time.Date(1800, 01, 01, 0, 0, 0, 0, time.UTC), + time.Date(2200, 12, 29, 00, 01, 37, 82, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + var actual, expected float64 + var marshaled []byte + + target := UnixTime(tc) + expected = float64(target.Duration().Nanoseconds()) / 1e9 + + if temp, err := json.Marshal(target); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + dec := json.NewDecoder(bytes.NewReader(marshaled)) + if err := dec.Decode(&actual); err != nil { + subT.Error(err) + return + } + + diff := math.Abs(actual - expected) + subT.Logf("\ngot :\t%g\nwant:\t%g\ndiff:\t%g", actual, expected, diff) + if diff > 1e-9 { //Must be within 1 nanosecond of one another + subT.Fail() + } + }) + } +} + +func TestUnixTime_UnmarshalJSON(t *testing.T) { + testCases := []struct { + text string + expected time.Time + }{ + {"1", UnixEpoch().Add(time.Second)}, + {"0", UnixEpoch()}, + {"1492203742", time.Date(2017, time.April, 14, 21, 02, 22, 0, time.UTC)}, // The time this test was written + {"-1", time.Date(1969, time.December, 31, 23, 59, 59, 0, time.UTC)}, + {"1.5", UnixEpoch().Add(1500 * time.Millisecond)}, + {"0e1", UnixEpoch()}, // See http://json.org for 'number' format definition. + {"1.3e+2", UnixEpoch().Add(130 * time.Second)}, + {"1.6E-10", UnixEpoch()}, // This is so small, it should get truncated into the UnixEpoch + {"2E-6", UnixEpoch().Add(2 * time.Microsecond)}, + {"1.289345e9", UnixEpoch().Add(1289345000 * time.Second)}, + {"1e-9", UnixEpoch().Add(time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run(tc.text, func(subT *testing.T) { + var rehydrated UnixTime + if err := json.Unmarshal([]byte(tc.text), &rehydrated); err != nil { + subT.Error(err) + return + } + + if time.Time(rehydrated) != tc.expected { + subT.Logf("\ngot: \t%v\nwant:\t%v\ndiff:\t%v", time.Time(rehydrated), tc.expected, time.Time(rehydrated).Sub(tc.expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_JSONRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + time.Date(2005, time.November, 5, 0, 0, 0, 0, time.UTC), // The day V for Vendetta (film) was released. + UnixEpoch().Add(-6 * time.Second), + UnixEpoch().Add(800 * time.Hour), + UnixEpoch().Add(time.Nanosecond), + time.Date(2015, time.September, 05, 4, 30, 12, 9992, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + subject := UnixTime(tc) + var marshaled []byte + if temp, err := json.Marshal(subject); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := json.Unmarshal(marshaled, &unmarshaled); err != nil { + subT.Error(err) + } + + actual := time.Time(unmarshaled) + diff := actual.Sub(tc) + subT.Logf("\ngot :\t%s\nwant:\t%s\ndiff:\t%s", actual.String(), tc.String(), diff.String()) + + if diff > time.Duration(100) { // We lose some precision be working in floats. We shouldn't lose more than 100 nanoseconds. + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalBinary(t *testing.T) { + testCases := []struct { + expected int64 + subject time.Time + }{ + {0, UnixEpoch()}, + {-15 * int64(time.Second), UnixEpoch().Add(-15 * time.Second)}, + {54, UnixEpoch().Add(54 * time.Nanosecond)}, + } + + for _, tc := range testCases { + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc.subject).MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled int64 + if err := binary.Read(bytes.NewReader(marshaled), binary.LittleEndian, &unmarshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != tc.expected { + subT.Logf("\ngot: \t%d\nwant:\t%d", unmarshaled, tc.expected) + subT.Fail() + } + }) + } +} + +func TestUnixTime_BinaryRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(800 * time.Minute), + UnixEpoch().Add(7 * time.Hour), + UnixEpoch().Add(-1 * time.Nanosecond), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + original := UnixTime(tc) + var marshaled []byte + + if temp, err := original.MarshalBinary(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var traveled UnixTime + if err := traveled.UnmarshalBinary(marshaled); err != nil { + subT.Error(err) + return + } + + if traveled != original { + subT.Logf("\ngot: \t%s\nwant:\t%s", time.Time(original).String(), time.Time(traveled).String()) + subT.Fail() + } + }) + } +} + +func TestUnixTime_MarshalText(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(45 * time.Second), + UnixEpoch().Add(time.Nanosecond), + UnixEpoch().Add(-100000 * time.Second), + } + + for _, tc := range testCases { + expected, _ := tc.MarshalText() + t.Run("", func(subT *testing.T) { + var marshaled []byte + + if temp, err := UnixTime(tc).MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + if string(marshaled) != string(expected) { + subT.Logf("\ngot: \t%s\nwant:\t%s", string(marshaled), string(expected)) + subT.Fail() + } + }) + } +} + +func TestUnixTime_TextRoundTrip(t *testing.T) { + testCases := []time.Time{ + UnixEpoch(), + UnixEpoch().Add(-1 * time.Nanosecond), + UnixEpoch().Add(1 * time.Nanosecond), + time.Date(2017, time.April, 17, 21, 00, 00, 00, time.UTC), + } + + for _, tc := range testCases { + t.Run(tc.String(), func(subT *testing.T) { + unixTC := UnixTime(tc) + + var marshaled []byte + + if temp, err := unixTC.MarshalText(); err == nil { + marshaled = temp + } else { + subT.Error(err) + return + } + + var unmarshaled UnixTime + if err := unmarshaled.UnmarshalText(marshaled); err != nil { + subT.Error(err) + return + } + + if unmarshaled != unixTC { + t.Logf("\ngot: \t%s\nwant:\t%s", time.Time(unmarshaled).String(), tc.String()) + t.Fail() + } + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/date/utility.go b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go new file mode 100644 index 000000000000..12addf0ebb4b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/date/utility.go @@ -0,0 +1,25 @@ +package date + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "strings" + "time" +) + +// ParseTime to parse Time string to specified format. +func ParseTime(format string, t string) (d time.Time, err error) { + return time.Parse(format, strings.ToUpper(t)) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error.go b/vendor/github.com/Azure/go-autorest/autorest/error.go new file mode 100644 index 000000000000..f724f33327ed --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error.go @@ -0,0 +1,98 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" +) + +const ( + // UndefinedStatusCode is used when HTTP status code is not available for an error. + UndefinedStatusCode = 0 +) + +// DetailedError encloses a error with details of the package, method, and associated HTTP +// status code (if any). +type DetailedError struct { + Original error + + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // StatusCode is the HTTP Response StatusCode (if non-zero) that led to the error. + StatusCode interface{} + + // Message is the error message. + Message string + + // Service Error is the response body of failed API in bytes + ServiceError []byte + + // Response is the response object that was returned during failure if applicable. + Response *http.Response +} + +// NewError creates a new Error conforming object from the passed packageType, method, and +// message. message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, nil, message, args...) +} + +// NewErrorWithResponse creates a new Error conforming object from the passed +// packageType, method, statusCode of the given resp (UndefinedStatusCode if +// resp is nil), and message. message is treated as a format string to which the +// optional args apply. +func NewErrorWithResponse(packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + return NewErrorWithError(nil, packageType, method, resp, message, args...) +} + +// NewErrorWithError creates a new Error conforming object from the +// passed packageType, method, statusCode of the given resp (UndefinedStatusCode +// if resp is nil), message, and original error. message is treated as a format +// string to which the optional args apply. +func NewErrorWithError(original error, packageType string, method string, resp *http.Response, message string, args ...interface{}) DetailedError { + if v, ok := original.(DetailedError); ok { + return v + } + + statusCode := UndefinedStatusCode + if resp != nil { + statusCode = resp.StatusCode + } + + return DetailedError{ + Original: original, + PackageType: packageType, + Method: method, + StatusCode: statusCode, + Message: fmt.Sprintf(message, args...), + Response: resp, + } +} + +// Error returns a formatted containing all available details (i.e., PackageType, Method, +// StatusCode, Message, and original error (if any)). +func (e DetailedError) Error() string { + if e.Original == nil { + return fmt.Sprintf("%s#%s: %s: StatusCode=%d", e.PackageType, e.Method, e.Message, e.StatusCode) + } + return fmt.Sprintf("%s#%s: %s: StatusCode=%d -- Original Error: %v", e.PackageType, e.Method, e.Message, e.StatusCode, e.Original) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/error_test.go b/vendor/github.com/Azure/go-autorest/autorest/error_test.go new file mode 100644 index 000000000000..f4f5d75ef582 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/error_test.go @@ -0,0 +1,202 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "net/http" + "reflect" + "regexp" + "testing" +) + +func TestNewErrorWithError_AssignsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.PackageType != "packageType" { + t.Fatalf("autorest: Error failed to set package type -- expected %v, received %v", "packageType", e.PackageType) + } +} + +func TestNewErrorWithError_AssignsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Method != "method" { + t.Fatalf("autorest: Error failed to set method -- expected %v, received %v", "method", e.Method) + } +} + +func TestNewErrorWithError_AssignsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if e.Message != "message" { + t.Fatalf("autorest: Error failed to set message -- expected %v, received %v", "message", e.Message) + } +} + +func TestNewErrorWithError_AssignsUndefinedStatusCodeIfRespNil(t *testing.T) { + e := NewErrorWithError(nil, "packageType", "method", nil, "message") + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithError_AssignsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithError_AcceptsArgs(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message %s", "arg") + + if matched, _ := regexp.MatchString(`.*arg.*`, e.Message); !matched { + t.Fatalf("autorest: Error failed to apply message arguments -- expected %v, received %v", + `.*arg.*`, e.Message) + } +} + +func TestNewErrorWithError_AssignsError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if e.Original != err { + t.Fatalf("autorest: Error failed to set error -- expected %v, received %v", err, e.Original) + } +} + +func TestNewErrorWithResponse_ContainsStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if e.StatusCode != http.StatusBadRequest { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", http.StatusBadRequest, e.StatusCode) + } +} + +func TestNewErrorWithResponse_nilResponse_ReportsUndefinedStatusCode(t *testing.T) { + e := NewErrorWithResponse("packageType", "method", nil, "message") + + if e.StatusCode != UndefinedStatusCode { + t.Fatalf("autorest: Error failed to set status code -- expected %v, received %v", UndefinedStatusCode, e.StatusCode) + } +} + +func TestNewErrorWithResponse_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithResponse("packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_Forwards(t *testing.T) { + e1 := NewError("packageType", "method", "message %s", "arg") + e2 := NewErrorWithError(nil, "packageType", "method", nil, "message %s", "arg") + + if !reflect.DeepEqual(e1, e2) { + t.Fatal("autorest: NewError did not return an error equivelent to NewErrorWithError") + } +} + +func TestNewErrorWithError_DoesNotWrapADetailedError(t *testing.T) { + e1 := NewError("packageType1", "method1", "message1 %s", "arg1") + e2 := NewErrorWithError(e1, "packageType2", "method2", nil, "message2 %s", "arg2") + + if !reflect.DeepEqual(e1, e2) { + t.Fatalf("autorest: NewErrorWithError incorrectly wrapped a DetailedError -- expected %v, received %v", e1, e2) + } +} + +func TestNewErrorWithError_WrapsAnError(t *testing.T) { + e1 := fmt.Errorf("Inner Error") + var e2 interface{} = NewErrorWithError(e1, "packageType", "method", nil, "message") + + if _, ok := e2.(DetailedError); !ok { + t.Fatalf("autorest: NewErrorWithError failed to wrap a standard error -- received %T", e2) + } +} + +func TestDetailedError(t *testing.T) { + err := fmt.Errorf("original") + e := NewErrorWithError(err, "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#Error failed to return original error message -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsPackageType(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*packageType.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include PackageType -- expected %v, received %v", + `.*packageType.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMethod(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*method.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Method -- expected %v, received %v", + `.*method.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsMessage(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*message.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Message -- expected %v, received %v", + `.*message.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsStatusCode(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", &http.Response{ + StatusCode: http.StatusBadRequest, + Status: http.StatusText(http.StatusBadRequest)}, "message") + + if matched, _ := regexp.MatchString(`.*400.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Status Code -- expected %v, received %v", + `.*400.*`, e.Error()) + } +} + +func TestDetailedErrorConstainsOriginal(t *testing.T) { + e := NewErrorWithError(fmt.Errorf("original"), "packageType", "method", nil, "message") + + if matched, _ := regexp.MatchString(`.*original.*`, e.Error()); !matched { + t.Fatalf("autorest: Error#String failed to include Original error -- expected %v, received %v", + `.*original.*`, e.Error()) + } +} + +func TestDetailedErrorSkipsOriginal(t *testing.T) { + e := NewError("packageType", "method", "message") + + if matched, _ := regexp.MatchString(`.*Original.*`, e.Error()); matched { + t.Fatalf("autorest: Error#String included missing Original error -- unexpected %v, received %v", + `.*Original.*`, e.Error()) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go new file mode 100644 index 000000000000..e2aab19c9dc5 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/helpers.go @@ -0,0 +1,158 @@ +package mocks + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io" + "net/http" + "time" +) + +const ( + // TestAuthorizationHeader is a faux HTTP Authorization header value + TestAuthorizationHeader = "BEARER SECRETTOKEN" + + // TestBadURL is a malformed URL + TestBadURL = " " + + // TestDelay is the Retry-After delay used in tests. + TestDelay = 0 * time.Second + + // TestHeader is the header used in tests. + TestHeader = "x-test-header" + + // TestURL is the URL used in tests. + TestURL = "https://microsoft.com/a/b/c/" + + // TestAzureAsyncURL is a URL used in Azure asynchronous tests + TestAzureAsyncURL = "https://microsoft.com/a/b/c/async" + + // TestLocationURL is a URL used in Azure asynchronous tests + TestLocationURL = "https://microsoft.com/a/b/c/location" +) + +const ( + headerLocation = "Location" + headerRetryAfter = "Retry-After" +) + +// NewRequest instantiates a new request. +func NewRequest() *http.Request { + return NewRequestWithContent("") +} + +// NewRequestWithContent instantiates a new request using the passed string for the body content. +func NewRequestWithContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBody(c)) + return r +} + +// NewRequestWithCloseBody instantiates a new request. +func NewRequestWithCloseBody() *http.Request { + return NewRequestWithCloseBodyContent("request body") +} + +// NewRequestWithCloseBodyContent instantiates a new request using the passed string for the body content. +func NewRequestWithCloseBodyContent(c string) *http.Request { + r, _ := http.NewRequest("GET", "https://microsoft.com/a/b/c/", NewBodyClose(c)) + return r +} + +// NewRequestForURL instantiates a new request using the passed URL. +func NewRequestForURL(u string) *http.Request { + return NewRequestWithParams("GET", u, NewBody("")) +} + +// NewRequestWithParams instantiates a new request using the provided parameters. +func NewRequestWithParams(method, u string, body io.Reader) *http.Request { + r, err := http.NewRequest(method, u, body) + if err != nil { + panic(fmt.Sprintf("mocks: ERROR (%v) parsing testing URL %s", err, u)) + } + return r +} + +// NewResponse instantiates a new response. +func NewResponse() *http.Response { + return NewResponseWithContent("") +} + +// NewResponseWithContent instantiates a new response with the passed string as the body content. +func NewResponseWithContent(c string) *http.Response { + return &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Body: NewBody(c), + Request: NewRequest(), + } +} + +// NewResponseWithStatus instantiates a new response using the passed string and integer as the +// status and status code. +func NewResponseWithStatus(s string, c int) *http.Response { + resp := NewResponse() + resp.Status = s + resp.StatusCode = c + return resp +} + +// NewResponseWithBodyAndStatus instantiates a new response using the specified mock body, +// status and status code +func NewResponseWithBodyAndStatus(body *Body, c int, s string) *http.Response { + resp := NewResponse() + resp.Body = body + resp.ContentLength = body.Length() + resp.Status = s + resp.StatusCode = c + return resp +} + +// SetResponseHeader adds a header to the passed response. +func SetResponseHeader(resp *http.Response, h string, v string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + resp.Header.Set(h, v) +} + +// SetResponseHeaderValues adds a header containing all the passed string values. +func SetResponseHeaderValues(resp *http.Response, h string, values []string) { + if resp.Header == nil { + resp.Header = make(http.Header) + } + for _, v := range values { + resp.Header.Add(h, v) + } +} + +// SetAcceptedHeaders adds the headers usually associated with a 202 Accepted response. +func SetAcceptedHeaders(resp *http.Response) { + SetLocationHeader(resp, TestURL) + SetRetryHeader(resp, TestDelay) +} + +// SetLocationHeader adds the Location header. +func SetLocationHeader(resp *http.Response, location string) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerLocation), location) +} + +// SetRetryHeader adds the Retry-After header. +func SetRetryHeader(resp *http.Response, delay time.Duration) { + SetResponseHeader(resp, http.CanonicalHeaderKey(headerRetryAfter), fmt.Sprintf("%v", delay.Seconds())) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go new file mode 100644 index 000000000000..cc1579e0f7ba --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/mocks/mocks.go @@ -0,0 +1,227 @@ +/* +Package mocks provides mocks and helpers used in testing. +*/ +package mocks + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io" + "net/http" + "time" +) + +// Body implements acceptable body over a string. +type Body struct { + s string + b []byte + isOpen bool + closeAttempts int +} + +// NewBody creates a new instance of Body. +func NewBody(s string) *Body { + return (&Body{s: s}).reset() +} + +// NewBodyClose creates a new instance of Body. +func NewBodyClose(s string) *Body { + return &Body{s: s} +} + +// Read reads into the passed byte slice and returns the bytes read. +func (body *Body) Read(b []byte) (n int, err error) { + if !body.IsOpen() { + return 0, fmt.Errorf("ERROR: Body has been closed") + } + if len(body.b) == 0 { + return 0, io.EOF + } + n = copy(b, body.b) + body.b = body.b[n:] + return n, nil +} + +// Close closes the body. +func (body *Body) Close() error { + if body.isOpen { + body.isOpen = false + body.closeAttempts++ + } + return nil +} + +// CloseAttempts returns the number of times Close was called. +func (body *Body) CloseAttempts() int { + return body.closeAttempts +} + +// IsOpen returns true if the Body has not been closed, false otherwise. +func (body *Body) IsOpen() bool { + return body.isOpen +} + +func (body *Body) reset() *Body { + body.isOpen = true + body.b = []byte(body.s) + return body +} + +// Length returns the number of bytes in the body. +func (body *Body) Length() int64 { + if body == nil { + return 0 + } + return int64(len(body.b)) +} + +type response struct { + r *http.Response + e error + d time.Duration +} + +// Sender implements a simple null sender. +type Sender struct { + attempts int + responses []response + numResponses int + repeatResponse []int + err error + repeatError int + emitErrorAfter int +} + +// NewSender creates a new instance of Sender. +func NewSender() *Sender { + return &Sender{} +} + +// Do accepts the passed request and, based on settings, emits a response and possible error. +func (c *Sender) Do(r *http.Request) (resp *http.Response, err error) { + c.attempts++ + + if len(c.responses) > 0 { + resp = c.responses[0].r + if resp != nil { + if b, ok := resp.Body.(*Body); ok { + b.reset() + } + } else { + err = c.responses[0].e + } + time.Sleep(c.responses[0].d) + c.repeatResponse[0]-- + if c.repeatResponse[0] == 0 { + c.responses = c.responses[1:] + c.repeatResponse = c.repeatResponse[1:] + } + } else { + resp = NewResponse() + } + if resp != nil { + resp.Request = r + } + + if c.emitErrorAfter > 0 { + c.emitErrorAfter-- + } else if c.err != nil { + err = c.err + c.repeatError-- + if c.repeatError == 0 { + c.err = nil + } + } + + return +} + +// AppendResponse adds the passed http.Response to the response stack. +func (c *Sender) AppendResponse(resp *http.Response) { + c.AppendAndRepeatResponse(resp, 1) +} + +// AppendResponseWithDelay adds the passed http.Response to the response stack with the specified delay. +func (c *Sender) AppendResponseWithDelay(resp *http.Response, delay time.Duration) { + c.AppendAndRepeatResponseWithDelay(resp, delay, 1) +} + +// AppendAndRepeatResponse adds the passed http.Response to the response stack along with a +// repeat count. A negative repeat count will return the response for all remaining calls to Do. +func (c *Sender) AppendAndRepeatResponse(resp *http.Response, repeat int) { + c.appendAndRepeat(response{r: resp}, repeat) +} + +// AppendAndRepeatResponseWithDelay adds the passed http.Response to the response stack with the specified +// delay along with a repeat count. A negative repeat count will return the response for all remaining calls to Do. +func (c *Sender) AppendAndRepeatResponseWithDelay(resp *http.Response, delay time.Duration, repeat int) { + c.appendAndRepeat(response{r: resp, d: delay}, repeat) +} + +// AppendError adds the passed error to the response stack. +func (c *Sender) AppendError(err error) { + c.AppendAndRepeatError(err, 1) +} + +// AppendAndRepeatError adds the passed error to the response stack along with a repeat +// count. A negative repeat count will return the response for all remaining calls to Do. +func (c *Sender) AppendAndRepeatError(err error, repeat int) { + c.appendAndRepeat(response{e: err}, repeat) +} + +func (c *Sender) appendAndRepeat(resp response, repeat int) { + if c.responses == nil { + c.responses = []response{resp} + c.repeatResponse = []int{repeat} + } else { + c.responses = append(c.responses, resp) + c.repeatResponse = append(c.repeatResponse, repeat) + } + c.numResponses++ +} + +// Attempts returns the number of times Do was called. +func (c *Sender) Attempts() int { + return c.attempts +} + +// SetError sets the error Do should return. +func (c *Sender) SetError(err error) { + c.SetAndRepeatError(err, 1) +} + +// SetAndRepeatError sets the error Do should return and how many calls to Do will return the error. +// A negative repeat value will return the error for all remaining calls to Do. +func (c *Sender) SetAndRepeatError(err error, repeat int) { + c.err = err + c.repeatError = repeat +} + +// SetEmitErrorAfter sets the number of attempts to be made before errors are emitted. +func (c *Sender) SetEmitErrorAfter(ea int) { + c.emitErrorAfter = ea +} + +// NumResponses returns the number of responses that have been added to the sender. +func (c *Sender) NumResponses() int { + return c.numResponses +} + +// T is a simple testing struct. +type T struct { + Name string `json:"name" xml:"Name"` + Age int `json:"age" xml:"Age"` +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer.go b/vendor/github.com/Azure/go-autorest/autorest/preparer.go new file mode 100644 index 000000000000..6d67bd7337b7 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer.go @@ -0,0 +1,480 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "mime/multipart" + "net/http" + "net/url" + "strings" +) + +const ( + mimeTypeJSON = "application/json" + mimeTypeOctetStream = "application/octet-stream" + mimeTypeFormPost = "application/x-www-form-urlencoded" + + headerAuthorization = "Authorization" + headerContentType = "Content-Type" + headerUserAgent = "User-Agent" +) + +// Preparer is the interface that wraps the Prepare method. +// +// Prepare accepts and possibly modifies an http.Request (e.g., adding Headers). Implementations +// must ensure to not share or hold per-invocation state since Preparers may be shared and re-used. +type Preparer interface { + Prepare(*http.Request) (*http.Request, error) +} + +// PreparerFunc is a method that implements the Preparer interface. +type PreparerFunc func(*http.Request) (*http.Request, error) + +// Prepare implements the Preparer interface on PreparerFunc. +func (pf PreparerFunc) Prepare(r *http.Request) (*http.Request, error) { + return pf(r) +} + +// PrepareDecorator takes and possibly decorates, by wrapping, a Preparer. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then affect the result. +type PrepareDecorator func(Preparer) Preparer + +// CreatePreparer creates, decorates, and returns a Preparer. +// Without decorators, the returned Preparer returns the passed http.Request unmodified. +// Preparers are safe to share and re-use. +func CreatePreparer(decorators ...PrepareDecorator) Preparer { + return DecoratePreparer( + Preparer(PreparerFunc(func(r *http.Request) (*http.Request, error) { return r, nil })), + decorators...) +} + +// DecoratePreparer accepts a Preparer and a, possibly empty, set of PrepareDecorators, which it +// applies to the Preparer. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (change the http.Request and then pass it +// along) or a post-decorator (pass the http.Request along and alter it on return). +func DecoratePreparer(p Preparer, decorators ...PrepareDecorator) Preparer { + for _, decorate := range decorators { + p = decorate(p) + } + return p +} + +// Prepare accepts an http.Request and a, possibly empty, set of PrepareDecorators. +// It creates a Preparer from the decorators which it then applies to the passed http.Request. +func Prepare(r *http.Request, decorators ...PrepareDecorator) (*http.Request, error) { + if r == nil { + return nil, NewError("autorest", "Prepare", "Invoked without an http.Request") + } + return CreatePreparer(decorators...).Prepare(r) +} + +// WithNothing returns a "do nothing" PrepareDecorator that makes no changes to the passed +// http.Request. +func WithNothing() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return p.Prepare(r) + }) + } +} + +// WithHeader returns a PrepareDecorator that sets the specified HTTP header of the http.Request to +// the passed value. It canonicalizes the passed header name (via http.CanonicalHeaderKey) before +// adding the header. +func WithHeader(header string, value string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(header), value) + } + return r, err + }) + } +} + +// WithHeaders returns a PrepareDecorator that sets the specified HTTP headers of the http.Request to +// the passed value. It canonicalizes the passed headers name (via http.CanonicalHeaderKey) before +// adding them. +func WithHeaders(headers map[string]interface{}) PrepareDecorator { + h := ensureValueStrings(headers) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.Header == nil { + r.Header = make(http.Header) + } + + for name, value := range h { + r.Header.Set(http.CanonicalHeaderKey(name), value) + } + } + return r, err + }) + } +} + +// WithBearerAuthorization returns a PrepareDecorator that adds an HTTP Authorization header whose +// value is "Bearer " followed by the supplied token. +func WithBearerAuthorization(token string) PrepareDecorator { + return WithHeader(headerAuthorization, fmt.Sprintf("Bearer %s", token)) +} + +// AsContentType returns a PrepareDecorator that adds an HTTP Content-Type header whose value +// is the passed contentType. +func AsContentType(contentType string) PrepareDecorator { + return WithHeader(headerContentType, contentType) +} + +// WithUserAgent returns a PrepareDecorator that adds an HTTP User-Agent header whose value is the +// passed string. +func WithUserAgent(ua string) PrepareDecorator { + return WithHeader(headerUserAgent, ua) +} + +// AsFormURLEncoded returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/x-www-form-urlencoded". +func AsFormURLEncoded() PrepareDecorator { + return AsContentType(mimeTypeFormPost) +} + +// AsJSON returns a PrepareDecorator that adds an HTTP Content-Type header whose value is +// "application/json". +func AsJSON() PrepareDecorator { + return AsContentType(mimeTypeJSON) +} + +// AsOctetStream returns a PrepareDecorator that adds the "application/octet-stream" Content-Type header. +func AsOctetStream() PrepareDecorator { + return AsContentType(mimeTypeOctetStream) +} + +// WithMethod returns a PrepareDecorator that sets the HTTP method of the passed request. The +// decorator does not validate that the passed method string is a known HTTP method. +func WithMethod(method string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Method = method + return p.Prepare(r) + }) + } +} + +// AsDelete returns a PrepareDecorator that sets the HTTP method to DELETE. +func AsDelete() PrepareDecorator { return WithMethod("DELETE") } + +// AsGet returns a PrepareDecorator that sets the HTTP method to GET. +func AsGet() PrepareDecorator { return WithMethod("GET") } + +// AsHead returns a PrepareDecorator that sets the HTTP method to HEAD. +func AsHead() PrepareDecorator { return WithMethod("HEAD") } + +// AsOptions returns a PrepareDecorator that sets the HTTP method to OPTIONS. +func AsOptions() PrepareDecorator { return WithMethod("OPTIONS") } + +// AsPatch returns a PrepareDecorator that sets the HTTP method to PATCH. +func AsPatch() PrepareDecorator { return WithMethod("PATCH") } + +// AsPost returns a PrepareDecorator that sets the HTTP method to POST. +func AsPost() PrepareDecorator { return WithMethod("POST") } + +// AsPut returns a PrepareDecorator that sets the HTTP method to PUT. +func AsPut() PrepareDecorator { return WithMethod("PUT") } + +// WithBaseURL returns a PrepareDecorator that populates the http.Request with a url.URL constructed +// from the supplied baseUrl. +func WithBaseURL(baseURL string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var u *url.URL + if u, err = url.Parse(baseURL); err != nil { + return r, err + } + if u.Scheme == "" { + err = fmt.Errorf("autorest: No scheme detected in URL %s", baseURL) + } + if err == nil { + r.URL = u + } + } + return r, err + }) + } +} + +// WithCustomBaseURL returns a PrepareDecorator that replaces brace-enclosed keys within the +// request base URL (i.e., http.Request.URL) with the corresponding values from the passed map. +func WithCustomBaseURL(baseURL string, urlParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(urlParameters) + for key, value := range parameters { + baseURL = strings.Replace(baseURL, "{"+key+"}", value, -1) + } + return WithBaseURL(baseURL) +} + +// WithFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) into the +// http.Request body. +func WithFormData(v url.Values) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + s := v.Encode() + + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), mimeTypeFormPost) + r.ContentLength = int64(len(s)) + r.Body = ioutil.NopCloser(strings.NewReader(s)) + } + return r, err + }) + } +} + +// WithMultiPartFormData returns a PrepareDecoratore that "URL encodes" (e.g., bar=baz&foo=quux) form parameters +// into the http.Request body. +func WithMultiPartFormData(formDataParameters map[string]interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + var body bytes.Buffer + writer := multipart.NewWriter(&body) + for key, value := range formDataParameters { + if rc, ok := value.(io.ReadCloser); ok { + var fd io.Writer + if fd, err = writer.CreateFormFile(key, key); err != nil { + return r, err + } + if _, err = io.Copy(fd, rc); err != nil { + return r, err + } + } else { + if err = writer.WriteField(key, ensureValueString(value)); err != nil { + return r, err + } + } + } + if err = writer.Close(); err != nil { + return r, err + } + if r.Header == nil { + r.Header = make(http.Header) + } + r.Header.Set(http.CanonicalHeaderKey(headerContentType), writer.FormDataContentType()) + r.Body = ioutil.NopCloser(bytes.NewReader(body.Bytes())) + r.ContentLength = int64(body.Len()) + return r, err + } + return r, err + }) + } +} + +// WithFile returns a PrepareDecorator that sends file in request body. +func WithFile(f io.ReadCloser) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := ioutil.ReadAll(f) + if err != nil { + return r, err + } + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + r.ContentLength = int64(len(b)) + } + return r, err + }) + } +} + +// WithBool returns a PrepareDecorator that encodes the passed bool into the body of the request +// and sets the Content-Length header. +func WithBool(v bool) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat32 returns a PrepareDecorator that encodes the passed float32 into the body of the +// request and sets the Content-Length header. +func WithFloat32(v float32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithFloat64 returns a PrepareDecorator that encodes the passed float64 into the body of the +// request and sets the Content-Length header. +func WithFloat64(v float64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt32 returns a PrepareDecorator that encodes the passed int32 into the body of the request +// and sets the Content-Length header. +func WithInt32(v int32) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithInt64 returns a PrepareDecorator that encodes the passed int64 into the body of the request +// and sets the Content-Length header. +func WithInt64(v int64) PrepareDecorator { + return WithString(fmt.Sprintf("%v", v)) +} + +// WithString returns a PrepareDecorator that encodes the passed string into the body of the request +// and sets the Content-Length header. +func WithString(v string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + r.ContentLength = int64(len(v)) + r.Body = ioutil.NopCloser(strings.NewReader(v)) + } + return r, err + }) + } +} + +// WithJSON returns a PrepareDecorator that encodes the data passed as JSON into the body of the +// request and sets the Content-Length header. +func WithJSON(v interface{}) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + b, err := json.Marshal(v) + if err == nil { + r.ContentLength = int64(len(b)) + r.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + } + return r, err + }) + } +} + +// WithPath returns a PrepareDecorator that adds the supplied path to the request URL. If the path +// is absolute (that is, it begins with a "/"), it replaces the existing path. +func WithPath(path string) PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPath", "Invoked with a nil URL") + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithEscapedPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. The +// values will be escaped (aka URL encoded) before insertion into the path. +func WithEscapedPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := escapeValueStrings(ensureValueStrings(pathParameters)) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithEscapedPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +// WithPathParameters returns a PrepareDecorator that replaces brace-enclosed keys within the +// request path (i.e., http.Request.URL.Path) with the corresponding values from the passed map. +func WithPathParameters(path string, pathParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(pathParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithPathParameters", "Invoked with a nil URL") + } + for key, value := range parameters { + path = strings.Replace(path, "{"+key+"}", value, -1) + } + + if r.URL, err = parseURL(r.URL, path); err != nil { + return r, err + } + } + return r, err + }) + } +} + +func parseURL(u *url.URL, path string) (*url.URL, error) { + p := strings.TrimRight(u.String(), "/") + if !strings.HasPrefix(path, "/") { + path = "/" + path + } + return url.Parse(p + path) +} + +// WithQueryParameters returns a PrepareDecorators that encodes and applies the query parameters +// given in the supplied map (i.e., key=value). +func WithQueryParameters(queryParameters map[string]interface{}) PrepareDecorator { + parameters := ensureValueStrings(queryParameters) + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, NewError("autorest", "WithQueryParameters", "Invoked with a nil URL") + } + + v := r.URL.Query() + for key, value := range parameters { + d, err := url.QueryUnescape(value) + if err != nil { + return r, err + } + v.Add(key, d) + } + r.URL.RawQuery = v.Encode() + } + return r, err + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go new file mode 100644 index 000000000000..b4d19967b1f6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/preparer_test.go @@ -0,0 +1,802 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io/ioutil" + "net/http" + "net/url" + "reflect" + "strconv" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +// PrepareDecorators wrap and invoke a Preparer. Most often, the decorator invokes the passed +// Preparer and decorates the response. +func ExamplePrepareDecorator() { + path := "a/b/c/" + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r, err := p.Prepare(r) + if err == nil { + if r.URL == nil { + return r, fmt.Errorf("ERROR: URL is not set") + } + r.URL.Path += path + } + return r, err + }) + } + } + + r, _ := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + pd()) + + fmt.Printf("Path is %s\n", r.URL) + // Output: Path is https://microsoft.com/a/b/c/ +} + +// PrepareDecorators may also modify and then invoke the Preparer. +func ExamplePrepareDecorator_pre() { + pd := func() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + r.Header.Add(http.CanonicalHeaderKey("ContentType"), "application/json") + return p.Prepare(r) + }) + } + } + + r, _ := Prepare(&http.Request{Header: http.Header{}}, + pd()) + + fmt.Printf("ContentType is %s\n", r.Header.Get("ContentType")) + // Output: ContentType is application/json +} + +// Create a sequence of three Preparers that build up the URL path. +func ExampleCreatePreparer() { + p := CreatePreparer( + WithBaseURL("https://microsoft.com/"), + WithPath("a"), + WithPath("b"), + WithPath("c")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c +} + +// Create and apply separate Preparers +func ExampleCreatePreparer_multiple() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p1 := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p2 := CreatePreparer(WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p1.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + r, err = p2.Prepare(r) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and chain separate Preparers +func ExampleCreatePreparer_chain() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + + p := CreatePreparer(WithBaseURL("https://microsoft.com/")) + p = DecoratePreparer(p, WithPathParameters("/{param1}/b/{param2}/", params)) + + r, err := p.Prepare(&http.Request{}) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create and prepare an http.Request in one call +func ExamplePrepare() { + r, err := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/"), + WithPath("a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("%s %s", r.Method, r.URL) + } + // Output: GET https://microsoft.com/a/b/c/ +} + +// Create a request for a supplied base URL and path +func ExampleWithBaseURL() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +func ExampleWithBaseURL_second() { + _, err := Prepare(&http.Request{}, WithBaseURL(":")) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +func ExampleWithCustomBaseURL() { + r, err := Prepare(&http.Request{}, + WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://myaccount.blob.core.windows.net/ +} + +func ExampleWithCustomBaseURL_second() { + _, err := Prepare(&http.Request{}, + WithCustomBaseURL(":", map[string]interface{}{})) + fmt.Println(err) + // Output: parse :: missing protocol scheme +} + +// Create a request with a custom HTTP header +func ExampleWithHeader() { + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/a/b/c/"), + WithHeader("x-foo", "bar")) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Header %s=%s\n", "x-foo", r.Header.Get("x-foo")) + } + // Output: Header x-foo=bar +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithFormData() { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains age=42&name=Rob+Pike +} + +// Create a request whose Body is the JSON encoding of a structure +func ExampleWithJSON() { + t := mocks.T{Name: "Rob Pike", Age: 42} + + r, err := Prepare(&http.Request{}, + WithJSON(&t)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Printf("Request Body contains %s\n", string(b)) + } + // Output: Request Body contains {"name":"Rob Pike","age":42} +} + +// Create a request from a path with escaped parameters +func ExampleWithEscapedPathParameters() { + params := map[string]interface{}{ + "param1": "a b c", + "param2": "d e f", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithEscapedPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a+b+c/b/d+e+f/ +} + +// Create a request from a path with parameters +func ExampleWithPathParameters() { + params := map[string]interface{}{ + "param1": "a", + "param2": "c", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPathParameters("/{param1}/b/{param2}/", params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/ +} + +// Create a request with query parameters +func ExampleWithQueryParameters() { + params := map[string]interface{}{ + "q1": "value1", + "q2": "value2", + } + r, err := Prepare(&http.Request{}, + WithBaseURL("https://microsoft.com/"), + WithPath("/a/b/c/"), + WithQueryParameters(params)) + if err != nil { + fmt.Printf("ERROR: %v\n", err) + } else { + fmt.Println(r.URL) + } + // Output: https://microsoft.com/a/b/c/?q1=value1&q2=value2 +} + +func TestWithCustomBaseURL(t *testing.T) { + r, err := Prepare(&http.Request{}, WithCustomBaseURL("https://{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err != nil { + t.Fatalf("autorest: WithCustomBaseURL should not fail") + } + if r.URL.String() != "https://myaccount.blob.core.windows.net/" { + t.Fatalf("autorest: WithCustomBaseURL expected https://myaccount.blob.core.windows.net/, got %s", r.URL) + } +} + +func TestWithCustomBaseURLwithInvalidURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithCustomBaseURL("hello/{account}.{service}.core.windows.net/", + map[string]interface{}{ + "account": "myaccount", + "service": "blob", + })) + if err == nil { + t.Fatalf("autorest: WithCustomBaseURL should fail fo URL parse error") + } +} + +func TestWithPathWithInvalidPath(t *testing.T) { + p := "path%2*end" + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPath(p)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape error for path '%v' ", p) + } + +} + +func TestWithPathParametersWithInvalidPath(t *testing.T) { + p := "path%2*end" + m := map[string]interface{}{ + "path1": p, + } + if _, err := Prepare(&http.Request{}, WithBaseURL("https://microsoft.com/"), WithPathParameters("/{path1}/", m)); err == nil { + t.Fatalf("autorest: WithPath should fail for invalid URL escape for path '%v' ", p) + } + +} + +func TestCreatePreparerDoesNotModify(t *testing.T) { + r1 := &http.Request{} + p := CreatePreparer() + r2, err := p.Prepare(r1) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreatePreparer without decorators modified the request") + } +} + +func TestCreatePreparerRunsDecoratorsInOrder(t *testing.T) { + p := CreatePreparer(WithBaseURL("https://microsoft.com/"), WithPath("1"), WithPath("2"), WithPath("3")) + r, err := p.Prepare(&http.Request{}) + if err != nil { + t.Fatalf("autorest: CreatePreparer failed (%v)", err) + } + if r.URL.String() != "https:/1/2/3" && r.URL.Host != "microsoft.com" { + t.Fatalf("autorest: CreatePreparer failed to run decorators in order") + } +} + +func TestAsContentType(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsContentType("application/text")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != "application/text" { + t.Fatalf("autorest: AsContentType failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsFormURLEncoded(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsFormURLEncoded()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeFormPost { + t.Fatalf("autorest: AsFormURLEncoded failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestAsJSON(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), AsJSON()) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerContentType) != mimeTypeJSON { + t.Fatalf("autorest: AsJSON failed to add header (%s=%s)", headerContentType, r.Header.Get(headerContentType)) + } +} + +func TestWithNothing(t *testing.T) { + r1 := mocks.NewRequest() + r2, err := Prepare(r1, WithNothing()) + if err != nil { + t.Fatalf("autorest: WithNothing returned an unexpected error (%v)", err) + } + + if !reflect.DeepEqual(r1, r2) { + t.Fatal("azure: WithNothing modified the passed HTTP Request") + } +} + +func TestWithBearerAuthorization(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithBearerAuthorization("SOME-TOKEN")) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.Header.Get(headerAuthorization) != "Bearer SOME-TOKEN" { + t.Fatalf("autorest: WithBearerAuthorization failed to add header (%s=%s)", headerAuthorization, r.Header.Get(headerAuthorization)) + } +} + +func TestWithUserAgent(t *testing.T) { + ua := "User Agent Go" + r, err := Prepare(mocks.NewRequest(), WithUserAgent(ua)) + if err != nil { + fmt.Printf("ERROR: %v", err) + } + if r.UserAgent() != ua || r.Header.Get(headerUserAgent) != ua { + t.Fatalf("autorest: WithUserAgent failed to add header (%s=%s)", headerUserAgent, r.Header.Get(headerUserAgent)) + } +} + +func TestWithMethod(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), WithMethod("HEAD")) + if r.Method != "HEAD" { + t.Fatal("autorest: WithMethod failed to set HTTP method header") + } +} + +func TestAsDelete(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsDelete()) + if r.Method != "DELETE" { + t.Fatal("autorest: AsDelete failed to set HTTP method header to DELETE") + } +} + +func TestAsGet(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsGet()) + if r.Method != "GET" { + t.Fatal("autorest: AsGet failed to set HTTP method header to GET") + } +} + +func TestAsHead(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsHead()) + if r.Method != "HEAD" { + t.Fatal("autorest: AsHead failed to set HTTP method header to HEAD") + } +} + +func TestAsOptions(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsOptions()) + if r.Method != "OPTIONS" { + t.Fatal("autorest: AsOptions failed to set HTTP method header to OPTIONS") + } +} + +func TestAsPatch(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPatch()) + if r.Method != "PATCH" { + t.Fatal("autorest: AsPatch failed to set HTTP method header to PATCH") + } +} + +func TestAsPost(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPost()) + if r.Method != "POST" { + t.Fatal("autorest: AsPost failed to set HTTP method header to POST") + } +} + +func TestAsPut(t *testing.T) { + r, _ := Prepare(mocks.NewRequest(), AsPut()) + if r.Method != "PUT" { + t.Fatal("autorest: AsPut failed to set HTTP method header to PUT") + } +} + +func TestPrepareWithNullRequest(t *testing.T) { + _, err := Prepare(nil) + if err == nil { + t.Fatal("autorest: Prepare failed to return an error when given a null http.Request") + } +} + +func TestWithFormData(t *testing.T) { + v := url.Values{} + v.Add("name", "Rob Pike") + v.Add("age", "42") + + r, err := Prepare(&http.Request{}, + WithFormData(v)) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFormData failed with error (%v)", err) + } + + expected := "name=Rob+Pike&age=42" + if !(string(b) == "name=Rob+Pike&age=42" || string(b) == "age=42&name=Rob+Pike") { + t.Fatalf("autorest:WithFormData failed to return correct string got (%v), expected (%v)", string(b), expected) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } + + if expected, got := r.Header.Get(http.CanonicalHeaderKey(headerContentType)), mimeTypeFormPost; expected != got { + t.Fatalf("autorest:WithFormData Content Type not set or set to wrong value. Expected %v and got %v", expected, got) + } +} + +func TestWithMultiPartFormDataSetsContentLength(t *testing.T) { + v := map[string]interface{}{ + "file": ioutil.NopCloser(strings.NewReader("Hello Gopher")), + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithMultiPartFormDataWithNoFile(t *testing.T) { + v := map[string]interface{}{ + "file": "no file", + "age": "42", + } + + r, err := Prepare(&http.Request{}, + WithMultiPartFormData(v)) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithMultiPartFormData failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithMultiPartFormData set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithFile(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFile(ioutil.NopCloser(strings.NewReader("Hello Gopher")))) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFile failed with error (%v)", err) + } + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithFile set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithBool_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithBool(false)) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithBool failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", false))) { + t.Fatalf("autorest: WithBool set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", false)))) + } + + v, err := strconv.ParseBool(string(s)) + if err != nil || v { + t.Fatalf("autorest: WithBool incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat32(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 32) + if err != nil || float32(v) != float32(42.0) { + t.Fatalf("autorest: WithFloat32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithFloat64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithFloat64(42.0)) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithFloat64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42.0))) { + t.Fatalf("autorest: WithFloat64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42.0)))) + } + + v, err := strconv.ParseFloat(string(s), 64) + if err != nil || v != float64(42.0) { + t.Fatalf("autorest: WithFloat64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt32_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt32(42)) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt32 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt32 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 32) + if err != nil || int32(v) != int32(42) { + t.Fatalf("autorest: WithInt32 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithInt64_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithInt64(42)) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithInt64 failed with error (%v)", err) + } + + if r.ContentLength != int64(len(fmt.Sprintf("%v", 42))) { + t.Fatalf("autorest: WithInt64 set Content-Length to %v, expected %v", r.ContentLength, int64(len(fmt.Sprintf("%v", 42)))) + } + + v, err := strconv.ParseInt(string(s), 10, 64) + if err != nil || v != int64(42) { + t.Fatalf("autorest: WithInt64 incorrectly encoded the boolean as %v", s) + } +} + +func TestWithString_SetsTheBody(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithString("value")) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + s, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithString failed with error (%v)", err) + } + + if r.ContentLength != int64(len("value")) { + t.Fatalf("autorest: WithString set Content-Length to %v, expected %v", r.ContentLength, int64(len("value"))) + } + + if string(s) != "value" { + t.Fatalf("autorest: WithString incorrectly encoded the string as %v", s) + } +} + +func TestWithJSONSetsContentLength(t *testing.T) { + r, err := Prepare(&http.Request{}, + WithJSON(&mocks.T{Name: "Rob Pike", Age: 42})) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: WithJSON failed with error (%v)", err) + } + + if r.ContentLength != int64(len(b)) { + t.Fatalf("autorest:WithJSON set Content-Length to %v, expected %v", r.ContentLength, len(b)) + } +} + +func TestWithHeaderAllocatesHeaders(t *testing.T) { + r, err := Prepare(mocks.NewRequest(), WithHeader("x-foo", "bar")) + if err != nil { + t.Fatalf("autorest: WithHeader failed (%v)", err) + } + if r.Header.Get("x-foo") != "bar" { + t.Fatalf("autorest: WithHeader failed to add header (%s=%s)", "x-foo", r.Header.Get("x-foo")) + } +} + +func TestWithPathCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPath("a")) + if err == nil { + t.Fatalf("autorest: WithPath failed to catch a nil URL") + } +} + +func TestWithEscapedPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithEscapedPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithEscapedPathParameters failed to catch a nil URL") + } +} + +func TestWithPathParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithPathParameters("", map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithPathParameters failed to catch a nil URL") + } +} + +func TestWithQueryParametersCatchesNilURL(t *testing.T) { + _, err := Prepare(&http.Request{}, WithQueryParameters(map[string]interface{}{"foo": "bar"})) + if err == nil { + t.Fatalf("autorest: WithQueryParameters failed to catch a nil URL") + } +} + +func TestModifyingExistingRequest(t *testing.T) { + r, err := Prepare(mocks.NewRequestForURL("https://bing.com"), WithPath("search"), WithQueryParameters(map[string]interface{}{"q": "golang"})) + if err != nil { + t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) + } + if r.URL.Host != "bing.com" { + t.Fatalf("autorest: Preparing an existing request failed when setting the host (%s)", r.URL) + } + + if r.URL.Path != "/search" { + t.Fatalf("autorest: Preparing an existing request failed when setting the path (%s)", r.URL.Path) + } + + if r.URL.RawQuery != "q=golang" { + t.Fatalf("autorest: Preparing an existing request failed when setting the query parameters (%s)", r.URL.RawQuery) + } +} + +func TestModifyingRequestWithExistingQueryParameters(t *testing.T) { + r, err := Prepare( + mocks.NewRequestForURL("https://bing.com"), + WithPath("search"), + WithQueryParameters(map[string]interface{}{"q": "golang the best"}), + WithQueryParameters(map[string]interface{}{"pq": "golang+encoded"}), + ) + if err != nil { + t.Fatalf("autorest: Preparing an existing request returned an error (%v)", err) + } + + if r.URL.Host != "bing.com" { + t.Fatalf("autorest: Preparing an existing request failed when setting the host (%s)", r.URL) + } + + if r.URL.Path != "/search" { + t.Fatalf("autorest: Preparing an existing request failed when setting the path (%s)", r.URL.Path) + } + + if r.URL.RawQuery != "pq=golang+encoded&q=golang+the+best" { + t.Fatalf("autorest: Preparing an existing request failed when setting the query parameters (%s)", r.URL.RawQuery) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder.go b/vendor/github.com/Azure/go-autorest/autorest/responder.go new file mode 100644 index 000000000000..a908a0adb708 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder.go @@ -0,0 +1,250 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "net/http" + "strings" +) + +// Responder is the interface that wraps the Respond method. +// +// Respond accepts and reacts to an http.Response. Implementations must ensure to not share or hold +// state since Responders may be shared and re-used. +type Responder interface { + Respond(*http.Response) error +} + +// ResponderFunc is a method that implements the Responder interface. +type ResponderFunc func(*http.Response) error + +// Respond implements the Responder interface on ResponderFunc. +func (rf ResponderFunc) Respond(r *http.Response) error { + return rf(r) +} + +// RespondDecorator takes and possibly decorates, by wrapping, a Responder. Decorators may react to +// the http.Response and pass it along or, first, pass the http.Response along then react. +type RespondDecorator func(Responder) Responder + +// CreateResponder creates, decorates, and returns a Responder. Without decorators, the returned +// Responder returns the passed http.Response unmodified. Responders may or may not be safe to share +// and re-used: It depends on the applied decorators. For example, a standard decorator that closes +// the response body is fine to share whereas a decorator that reads the body into a passed struct +// is not. +// +// To prevent memory leaks, ensure that at least one Responder closes the response body. +func CreateResponder(decorators ...RespondDecorator) Responder { + return DecorateResponder( + Responder(ResponderFunc(func(r *http.Response) error { return nil })), + decorators...) +} + +// DecorateResponder accepts a Responder and a, possibly empty, set of RespondDecorators, which it +// applies to the Responder. Decorators are applied in the order received, but their affect upon the +// request depends on whether they are a pre-decorator (react to the http.Response and then pass it +// along) or a post-decorator (pass the http.Response along and then react). +func DecorateResponder(r Responder, decorators ...RespondDecorator) Responder { + for _, decorate := range decorators { + r = decorate(r) + } + return r +} + +// Respond accepts an http.Response and a, possibly empty, set of RespondDecorators. +// It creates a Responder from the decorators it then applies to the passed http.Response. +func Respond(r *http.Response, decorators ...RespondDecorator) error { + if r == nil { + return nil + } + return CreateResponder(decorators...).Respond(r) +} + +// ByIgnoring returns a RespondDecorator that ignores the passed http.Response passing it unexamined +// to the next RespondDecorator. +func ByIgnoring() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + return r.Respond(resp) + }) + } +} + +// ByCopying copies the contents of the http.Response Body into the passed bytes.Buffer as +// the Body is read. +func ByCopying(b *bytes.Buffer) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + resp.Body = TeeReadCloser(resp.Body, b) + } + return err + }) + } +} + +// ByDiscardingBody returns a RespondDecorator that first invokes the passed Responder after which +// it copies the remaining bytes (if any) in the response body to ioutil.Discard. Since the passed +// Responder is invoked prior to discarding the response body, the decorator may occur anywhere +// within the set. +func ByDiscardingBody() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && resp != nil && resp.Body != nil { + if _, err := io.Copy(ioutil.Discard, resp.Body); err != nil { + return fmt.Errorf("Error discarding the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosing returns a RespondDecorator that first invokes the passed Responder after which it +// closes the response body. Since the passed Responder is invoked prior to closing the response +// body, the decorator may occur anywhere within the set. +func ByClosing() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByClosingIfError returns a RespondDecorator that first invokes the passed Responder after which +// it closes the response if the passed Responder returns an error and the response body exists. +func ByClosingIfError() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil && resp != nil && resp.Body != nil { + if err := resp.Body.Close(); err != nil { + return fmt.Errorf("Error closing the response body: %v", err) + } + } + return err + }) + } +} + +// ByUnmarshallingJSON returns a RespondDecorator that decodes a JSON document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingJSON(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + // Some responses might include a BOM, remove for successful unmarshalling + b = bytes.TrimPrefix(b, []byte("\xef\xbb\xbf")) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else if len(strings.Trim(string(b), " ")) > 0 { + errInner = json.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling JSON - Error = '%v' JSON = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// ByUnmarshallingXML returns a RespondDecorator that decodes a XML document returned in the +// response Body into the value pointed to by v. +func ByUnmarshallingXML(v interface{}) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + b, errInner := ioutil.ReadAll(resp.Body) + if errInner != nil { + err = fmt.Errorf("Error occurred reading http.Response#Body - Error = '%v'", errInner) + } else { + errInner = xml.Unmarshal(b, v) + if errInner != nil { + err = fmt.Errorf("Error occurred unmarshalling Xml - Error = '%v' Xml = '%s'", errInner, string(b)) + } + } + } + return err + }) + } +} + +// WithErrorUnlessStatusCode returns a RespondDecorator that emits an error unless the response +// StatusCode is among the set passed. On error, response body is fully read into a buffer and +// presented in the returned error, as well as in the response body. +func WithErrorUnlessStatusCode(codes ...int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + derr := NewErrorWithResponse("autorest", "WithErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + if resp.Body != nil { + defer resp.Body.Close() + b, _ := ioutil.ReadAll(resp.Body) + derr.ServiceError = b + resp.Body = ioutil.NopCloser(bytes.NewReader(b)) + } + err = derr + } + return err + }) + } +} + +// WithErrorUnlessOK returns a RespondDecorator that emits an error if the response StatusCode is +// anything other than HTTP 200. +func WithErrorUnlessOK() RespondDecorator { + return WithErrorUnlessStatusCode(http.StatusOK) +} + +// ExtractHeader extracts all values of the specified header from the http.Response. It returns an +// empty string slice if the passed http.Response is nil or the header does not exist. +func ExtractHeader(header string, resp *http.Response) []string { + if resp != nil && resp.Header != nil { + return resp.Header[http.CanonicalHeaderKey(header)] + } + return nil +} + +// ExtractHeaderValue extracts the first value of the specified header from the http.Response. It +// returns an empty string if the passed http.Response is nil or the header does not exist. +func ExtractHeaderValue(header string, resp *http.Response) string { + h := ExtractHeader(header, resp) + if len(h) > 0 { + return h[0] + } + return "" +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/responder_test.go b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go new file mode 100644 index 000000000000..4a57b1e3b196 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/responder_test.go @@ -0,0 +1,665 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleWithErrorUnlessOK() { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + // Respond and leave the response body open (for a subsequent responder to close) + err := Respond(r, + WithErrorUnlessOK(), + ByDiscardingBody(), + ByClosingIfError()) + + if err == nil { + fmt.Printf("%s of %s returned HTTP 200", r.Request.Method, r.Request.URL) + + // Complete handling the response and close the body + Respond(r, + ByDiscardingBody(), + ByClosing()) + } + // Output: GET of https://microsoft.com/a/b/c/ returned HTTP 200 +} + +func ExampleByUnmarshallingJSON() { + c := ` + { + "name" : "Rob Pike", + "age" : 42 + } + ` + + type V struct { + Name string `json:"name"` + Age int `json:"age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingJSON(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func ExampleByUnmarshallingXML() { + c := ` + + Rob Pike + 42 + ` + + type V struct { + Name string `xml:"Name"` + Age int `xml:"Age"` + } + + v := &V{} + + Respond(mocks.NewResponseWithContent(c), + ByUnmarshallingXML(v), + ByClosing()) + + fmt.Printf("%s is %d years old\n", v.Name, v.Age) + // Output: Rob Pike is 42 years old +} + +func TestCreateResponderDoesNotModify(t *testing.T) { + r1 := mocks.NewResponse() + r2 := mocks.NewResponse() + p := CreateResponder() + err := p.Respond(r1) + if err != nil { + t.Fatalf("autorest: CreateResponder failed (%v)", err) + } + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: CreateResponder without decorators modified the response") + } +} + +func TestCreateResponderRunsDecoratorsInOrder(t *testing.T) { + s := "" + + d := func(n int) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err == nil { + s += fmt.Sprintf("%d", n) + } + return err + }) + } + } + + p := CreateResponder(d(1), d(2), d(3)) + err := p.Respond(&http.Response{}) + if err != nil { + t.Fatalf("autorest: Respond failed (%v)", err) + } + + if s != "123" { + t.Fatalf("autorest: CreateResponder invoked decorators in an incorrect order; expected '123', received '%s'", s) + } +} + +func TestByIgnoring(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(r2 *http.Response) error { + r1 := mocks.NewResponse() + if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: ByIgnoring modified the HTTP Response -- received %v, expected %v", r2, r1) + } + return nil + }) + } + })(), + ByIgnoring(), + ByClosing()) +} + +func TestByCopying_Copies(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + err := Respond(r, + ByCopying(b), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByCopying returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: ByCopying failed to copy the bytes read") + } +} + +func TestByCopying_ReturnsNestedErrors(t *testing.T) { + r := mocks.NewResponseWithContent(jsonT) + + r.Body.Close() + err := Respond(r, + ByCopying(&bytes.Buffer{}), + ByUnmarshallingJSON(&mocks.T{}), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByCopying failed to return the expected error") + } +} + +func TestByCopying_AcceptsNilReponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByCopying_AcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByCopying(&bytes.Buffer{})) +} + +func TestByClosing(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, ByClosing()) + if err != nil { + t.Fatalf("autorest: ByClosing failed (%v)", err) + } + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body") + } +} + +func TestByClosingAcceptsNilResponse(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingAcceptsNilBody(t *testing.T) { + r := mocks.NewResponse() + + Respond(r, + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosing()) +} + +func TestByClosingClosesEvenAfterErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosing did not close the response body after an error occurred") + } +} + +func TestByClosingClosesReturnsNestedErrors(t *testing.T) { + var e error + + r := mocks.NewResponse() + err := Respond(r, + withErrorRespondDecorator(&e), + ByClosing()) + + if err == nil || !reflect.DeepEqual(e, err) { + t.Fatalf("autorest: ByClosing failed to return a nested error") + } +} + +func TestByClosingIfErrorAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByClosingIfError()) +} + +func TestByClosingIfErrorClosesIfAnErrorOccurs(t *testing.T) { + var e error + + r := mocks.NewResponse() + Respond(r, + withErrorRespondDecorator(&e), + ByClosingIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError did not close the response body after an error occurred") + } +} + +func TestByClosingIfErrorDoesNotClosesIfNoErrorOccurs(t *testing.T) { + r := mocks.NewResponse() + Respond(r, + ByClosingIfError()) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: ByClosingIfError closed the response body even though no error occurred") + } +} + +func TestByDiscardingBody(t *testing.T) { + r := mocks.NewResponse() + err := Respond(r, + ByDiscardingBody()) + if err != nil { + t.Fatalf("autorest: ByDiscardingBody failed (%v)", err) + } + buf, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Fatalf("autorest: Reading result of ByDiscardingBody failed (%v)", err) + } + + if len(buf) != 0 { + t.Logf("autorest: Body was not empty after calling ByDiscardingBody.") + t.Fail() + } +} + +func TestByDiscardingBodyAcceptsNilResponse(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + r.Respond(nil) + return nil + }) + } + })(), + ByDiscardingBody()) +} + +func TestByDiscardingBodyAcceptsNilBody(t *testing.T) { + var e error + + r := mocks.NewResponse() + + Respond(r, + withErrorRespondDecorator(&e), + (func() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + resp.Body.Close() + resp.Body = nil + r.Respond(resp) + return nil + }) + } + })(), + ByDiscardingBody()) +} + +func TestByUnmarshallingJSON(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingJSON failed to properly unmarshal") + } +} + +func TestByUnmarshallingJSON_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to receive / respond to read error") + } +} + +func TestByUnmarshallingJSONIncludesJSONInErrors(t *testing.T) { + v := &mocks.T{} + j := jsonT[0 : len(jsonT)-2] + r := mocks.NewResponseWithContent(j) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), j) { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return JSON in error (%v)", err) + } +} + +func TestByUnmarshallingJSONEmptyInput(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(``) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingJSON failed to return nil in case of empty JSON (%v)", err) + } +} + +func TestByUnmarshallingXML(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: ByUnmarshallingXML failed (%v)", err) + } + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: ByUnmarshallingXML failed to properly unmarshal") + } +} + +func TestByUnmarshallingXML_HandlesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(xmlT) + r.Body.(*mocks.Body).Close() + + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: ByUnmarshallingXML failed to receive / respond to read error") + } +} + +func TestByUnmarshallingXMLIncludesXMLInErrors(t *testing.T) { + v := &mocks.T{} + x := xmlT[0 : len(xmlT)-2] + r := mocks.NewResponseWithContent(x) + err := Respond(r, + ByUnmarshallingXML(v), + ByClosing()) + if err == nil || !strings.Contains(err.Error(), x) { + t.Fatalf("autorest: ByUnmarshallingXML failed to return XML in error (%v)", err) + } +} + +func TestRespondAcceptsNullResponse(t *testing.T) { + err := Respond(nil) + if err != nil { + t.Fatalf("autorest: Respond returned an unexpected error when given a null Response (%v)", err) + } +} + +func TestWithErrorUnlessStatusCodeOKResponse(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) failed on okay response. (%v)", err) + } + + if v.Name != "Rob Pike" || v.Age != 42 { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) corrupted the response body of okay response.") + } +} + +func TesWithErrorUnlessStatusCodeErrorResponse(t *testing.T) { + v := &mocks.T{} + e := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK), + ByUnmarshallingJSON(v), + ByClosing()) + + if err == nil { + t.Fatal("autorest: WithErrorUnlessStatusCode(http.StatusOK) did not return error, on a response to a bad request.") + } + + var errorRespBody []byte + if derr, ok := err.(DetailedError); !ok { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) got wrong error type : %T, expected: DetailedError, on a response to a bad request.", err) + } else { + errorRespBody = derr.ServiceError + } + + if errorRespBody == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) ServiceError not returned in DetailedError on a response to a bad request.") + } + + err = json.Unmarshal(errorRespBody, e) + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK) cannot parse error returned in ServiceError into json. %v", err) + } + + expected := &mocks.T{Name: "Rob Pike", Age: 42} + if e != expected { + t.Fatalf("autorest: WithErrorUnlessStatusCode(http.StatusOK wrong value from parsed ServiceError: got=%#v expected=%#v", e, expected) + } +} + +func TestWithErrorUnlessStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusBadRequest, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode returned an error (%v) for an acceptable status code (%s)", err, r.Status) + } +} + +func TestWithErrorUnlessStatusCodeEmitsErrorForUnacceptableStatusCode(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessStatusCode(http.StatusOK, http.StatusUnauthorized, http.StatusInternalServerError), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessStatusCode failed to return an error for an unacceptable status code (%s)", r.Status) + } +} + +func TestWithErrorUnlessOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err != nil { + t.Fatalf("autorest: WithErrorUnlessOK returned an error for OK status code (%v)", err) + } +} + +func TestWithErrorUnlessOKEmitsErrorIfNotOK(t *testing.T) { + r := mocks.NewResponse() + r.Request = mocks.NewRequest() + r.Status = "400 BadRequest" + r.StatusCode = http.StatusBadRequest + + err := Respond(r, + WithErrorUnlessOK(), + ByClosingIfError()) + + if err == nil { + t.Fatalf("autorest: WithErrorUnlessOK failed to return an error for a non-OK status code (%v)", err) + } +} + +func TestExtractHeader(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderHandlesMissingHeader(t *testing.T) { + var v []string + r := mocks.NewResponse() + + if !reflect.DeepEqual(ExtractHeader(mocks.TestHeader, r), v) { + t.Fatalf("autorest: ExtractHeader failed to handle a missing header -- expected %v, received %v", + v, ExtractHeader(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValue(t *testing.T) { + r := mocks.NewResponse() + v := "v1" + mocks.SetResponseHeader(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueHandlesMissingHeader(t *testing.T) { + r := mocks.NewResponse() + v := "" + + if ExtractHeaderValue(mocks.TestHeader, r) != v { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v, mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} + +func TestExtractHeaderValueRetrievesFirstValue(t *testing.T) { + r := mocks.NewResponse() + v := []string{"v1", "v2", "v3"} + mocks.SetResponseHeaderValues(r, mocks.TestHeader, v) + + if ExtractHeaderValue(mocks.TestHeader, r) != v[0] { + t.Fatalf("autorest: ExtractHeader failed to retrieve the expected header -- expected [%s]%v, received [%s]%v", + mocks.TestHeader, v[0], mocks.TestHeader, ExtractHeaderValue(mocks.TestHeader, r)) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go new file mode 100644 index 000000000000..fa11dbed79b1 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest.go @@ -0,0 +1,52 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// NewRetriableRequest returns a wrapper around an HTTP request that support retry logic. +func NewRetriableRequest(req *http.Request) *RetriableRequest { + return &RetriableRequest{req: req} +} + +// Request returns the wrapped HTTP request. +func (rr *RetriableRequest) Request() *http.Request { + return rr.req +} + +func (rr *RetriableRequest) prepareFromByteReader() (err error) { + // fall back to making a copy (only do this once) + b := []byte{} + if rr.req.ContentLength > 0 { + b = make([]byte, rr.req.ContentLength) + _, err = io.ReadFull(rr.req.Body, b) + if err != nil { + return err + } + } else { + b, err = ioutil.ReadAll(rr.req.Body) + if err != nil { + return err + } + } + rr.br = bytes.NewReader(b) + rr.req.Body = ioutil.NopCloser(rr.br) + return err +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go new file mode 100644 index 000000000000..7143cc61b58b --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.7.go @@ -0,0 +1,54 @@ +// +build !go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.br != nil { + _, err = rr.br.Seek(0, 0 /*io.SeekStart*/) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go new file mode 100644 index 000000000000..ae15c6bf9629 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/retriablerequest_1.8.go @@ -0,0 +1,66 @@ +// +build go1.8 + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package autorest + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" +) + +// RetriableRequest provides facilities for retrying an HTTP request. +type RetriableRequest struct { + req *http.Request + rc io.ReadCloser + br *bytes.Reader +} + +// Prepare signals that the request is about to be sent. +func (rr *RetriableRequest) Prepare() (err error) { + // preserve the request body; this is to support retry logic as + // the underlying transport will always close the reqeust body + if rr.req.Body != nil { + if rr.rc != nil { + rr.req.Body = rr.rc + } else if rr.br != nil { + _, err = rr.br.Seek(0, io.SeekStart) + rr.req.Body = ioutil.NopCloser(rr.br) + } + if err != nil { + return err + } + if rr.req.GetBody != nil { + // this will allow us to preserve the body without having to + // make a copy. note we need to do this on each iteration + rr.rc, err = rr.req.GetBody() + if err != nil { + return err + } + } else if rr.br == nil { + // fall back to making a copy (only do this once) + err = rr.prepareFromByteReader() + } + } + return err +} + +func removeRequestBody(req *http.Request) { + req.Body = nil + req.GetBody = nil + req.ContentLength = 0 +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender.go b/vendor/github.com/Azure/go-autorest/autorest/sender.go new file mode 100644 index 000000000000..cacbd815717d --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender.go @@ -0,0 +1,325 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "log" + "math" + "net/http" + "strconv" + "time" +) + +// Sender is the interface that wraps the Do method to send HTTP requests. +// +// The standard http.Client conforms to this interface. +type Sender interface { + Do(*http.Request) (*http.Response, error) +} + +// SenderFunc is a method that implements the Sender interface. +type SenderFunc func(*http.Request) (*http.Response, error) + +// Do implements the Sender interface on SenderFunc. +func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) { + return sf(r) +} + +// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the +// http.Request and pass it along or, first, pass the http.Request along then react to the +// http.Response result. +type SendDecorator func(Sender) Sender + +// CreateSender creates, decorates, and returns, as a Sender, the default http.Client. +func CreateSender(decorators ...SendDecorator) Sender { + return DecorateSender(&http.Client{}, decorators...) +} + +// DecorateSender accepts a Sender and a, possibly empty, set of SendDecorators, which is applies to +// the Sender. Decorators are applied in the order received, but their affect upon the request +// depends on whether they are a pre-decorator (change the http.Request and then pass it along) or a +// post-decorator (pass the http.Request along and react to the results in http.Response). +func DecorateSender(s Sender, decorators ...SendDecorator) Sender { + for _, decorate := range decorators { + s = decorate(s) + } + return s +} + +// Send sends, by means of the default http.Client, the passed http.Request, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// Send is a convenience method and not recommended for production. Advanced users should use +// SendWithSender, passing and sharing their own Sender (e.g., instance of http.Client). +// +// Send will not poll or retry requests. +func Send(r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return SendWithSender(&http.Client{}, r, decorators...) +} + +// SendWithSender sends the passed http.Request, through the provided Sender, returning the +// http.Response and possible error. It also accepts a, possibly empty, set of SendDecorators which +// it will apply the http.Client before invoking the Do method. +// +// SendWithSender will not poll or retry requests. +func SendWithSender(s Sender, r *http.Request, decorators ...SendDecorator) (*http.Response, error) { + return DecorateSender(s, decorators...).Do(r) +} + +// AfterDelay returns a SendDecorator that delays for the passed time.Duration before +// invoking the Sender. The delay may be terminated by closing the optional channel on the +// http.Request. If canceled, no further Senders are invoked. +func AfterDelay(d time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + if !DelayForBackoff(d, 0, r.Context().Done()) { + return nil, fmt.Errorf("autorest: AfterDelay canceled before full delay") + } + return s.Do(r) + }) + } +} + +// AsIs returns a SendDecorator that invokes the passed Sender without modifying the http.Request. +func AsIs() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + return s.Do(r) + }) + } +} + +// DoCloseIfError returns a SendDecorator that first invokes the passed Sender after which +// it closes the response if the passed Sender returns an error and the response body exists. +func DoCloseIfError() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + Respond(resp, ByDiscardingBody(), ByClosing()) + } + return resp, err + }) + } +} + +// DoErrorIfStatusCode returns a SendDecorator that emits an error if the response StatusCode is +// among the set passed. Since these are artificial errors, the response body may still require +// closing. +func DoErrorIfStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorIfStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoErrorUnlessStatusCode returns a SendDecorator that emits an error unless the response +// StatusCode is among the set passed. Since these are artificial errors, the response body +// may still require closing. +func DoErrorUnlessStatusCode(codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil && !ResponseHasStatusCode(resp, codes...) { + err = NewErrorWithResponse("autorest", "DoErrorUnlessStatusCode", resp, "%v %v failed with %s", + resp.Request.Method, + resp.Request.URL, + resp.Status) + } + return resp, err + }) + } +} + +// DoPollForStatusCodes returns a SendDecorator that polls if the http.Response contains one of the +// passed status codes. It expects the http.Response to contain a Location header providing the +// URL at which to poll (using GET) and will poll until the time passed is equal to or greater than +// the supplied duration. It will delay between requests for the duration specified in the +// RetryAfter header or, if the header is absent, the passed delay. Polling may be canceled by +// closing the optional channel on the http.Request. +func DoPollForStatusCodes(duration time.Duration, delay time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + resp, err = s.Do(r) + + if err == nil && ResponseHasStatusCode(resp, codes...) { + r, err = NewPollingRequestWithContext(r.Context(), resp) + + for err == nil && ResponseHasStatusCode(resp, codes...) { + Respond(resp, + ByDiscardingBody(), + ByClosing()) + resp, err = SendWithSender(s, r, + AfterDelay(GetRetryAfter(resp, delay))) + } + } + + return resp, err + }) + } +} + +// DoRetryForAttempts returns a SendDecorator that retries a failed request for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForAttempts(attempts int, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + for attempt := 0; attempt < attempts; attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// DoRetryForStatusCodes returns a SendDecorator that retries for specified statusCodes for up to the specified +// number of attempts, exponentially backing off between requests using the supplied backoff +// time.Duration (which may be zero). Retrying may be canceled by closing the optional channel on +// the http.Request. +func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + // Increment to add the first call (attempts denotes number of retries) + attempts++ + for attempt := 0; attempt < attempts; { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + // if the error isn't temporary don't bother retrying + if err != nil && !IsTemporaryNetworkError(err) { + return nil, err + } + // we want to retry if err is not nil (e.g. transient network failure). note that for failed authentication + // resp and err will both have a value, so in this case we don't want to retry as it will never succeed. + if err == nil && !ResponseHasStatusCode(resp, codes...) || IsTokenRefreshError(err) { + return resp, err + } + delayed := DelayWithRetryAfter(resp, r.Context().Done()) + if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + // don't count a 429 against the number of attempts + // so that we continue to retry until it succeeds + if resp == nil || resp.StatusCode != http.StatusTooManyRequests { + attempt++ + } + } + return resp, err + }) + } +} + +// DelayWithRetryAfter invokes time.After for the duration specified in the "Retry-After" header in +// responses with status code 429 +func DelayWithRetryAfter(resp *http.Response, cancel <-chan struct{}) bool { + if resp == nil { + return false + } + retryAfter, _ := strconv.Atoi(resp.Header.Get("Retry-After")) + if resp.StatusCode == http.StatusTooManyRequests && retryAfter > 0 { + select { + case <-time.After(time.Duration(retryAfter) * time.Second): + return true + case <-cancel: + return false + } + } + return false +} + +// DoRetryForDuration returns a SendDecorator that retries the request until the total time is equal +// to or greater than the specified duration, exponentially backing off between requests using the +// supplied backoff time.Duration (which may be zero). Retrying may be canceled by closing the +// optional channel on the http.Request. +func DoRetryForDuration(d time.Duration, backoff time.Duration) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (resp *http.Response, err error) { + rr := NewRetriableRequest(r) + end := time.Now().Add(d) + for attempt := 0; time.Now().Before(end); attempt++ { + err = rr.Prepare() + if err != nil { + return resp, err + } + resp, err = s.Do(rr.Request()) + if err == nil { + return resp, err + } + if !DelayForBackoff(backoff, attempt, r.Context().Done()) { + return nil, r.Context().Err() + } + } + return resp, err + }) + } +} + +// WithLogging returns a SendDecorator that implements simple before and after logging of the +// request. +func WithLogging(logger *log.Logger) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + logger.Printf("Sending %s %s", r.Method, r.URL) + resp, err := s.Do(r) + if err != nil { + logger.Printf("%s %s received error '%v'", r.Method, r.URL, err) + } else { + logger.Printf("%s %s received %s", r.Method, r.URL, resp.Status) + } + return resp, err + }) + } +} + +// DelayForBackoff invokes time.After for the supplied backoff duration raised to the power of +// passed attempt (i.e., an exponential backoff delay). Backoff duration is in seconds and can set +// to zero for no delay. The delay may be canceled by closing the passed channel. If terminated early, +// returns false. +// Note: Passing attempt 1 will result in doubling "backoff" duration. Treat this as a zero-based attempt +// count. +func DelayForBackoff(backoff time.Duration, attempt int, cancel <-chan struct{}) bool { + select { + case <-time.After(time.Duration(backoff.Seconds()*math.Pow(2, float64(attempt))) * time.Second): + return true + case <-cancel: + return false + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/sender_test.go b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go new file mode 100644 index 000000000000..4c2f0e9a891c --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/sender_test.go @@ -0,0 +1,904 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "context" + "fmt" + "log" + "net/http" + "os" + "reflect" + "sync" + "testing" + "time" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +func ExampleSendWithSender() { + r := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(r) + + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 10) + + logger := log.New(os.Stdout, "autorest: ", 0) + na := NullAuthorizer{} + + req, _ := Prepare(&http.Request{}, + AsGet(), + WithBaseURL("https://microsoft.com/a/b/c/"), + na.WithAuthorization()) + + r, _ = SendWithSender(client, req, + WithLogging(logger), + DoErrorIfStatusCode(http.StatusAccepted), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + // Output: + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted + // autorest: Sending GET https://microsoft.com/a/b/c/ + // autorest: GET https://microsoft.com/a/b/c/ received 202 Accepted +} + +func ExampleDoRetryForAttempts() { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + // Retry with backoff -- ensure returned Bodies are closed + r, _ := SendWithSender(client, mocks.NewRequest(), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts", client.Attempts()) + // Output: Retry stopped after 5 attempts +} + +func ExampleDoErrorIfStatusCode() { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 NoContent", http.StatusNoContent), 10) + + // Chain decorators to retry the request, up to five times, if the status code is 204 + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusNoContent), + DoCloseIfError(), + DoRetryForAttempts(5, time.Duration(0))) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + fmt.Printf("Retry stopped after %d attempts with code %s", client.Attempts(), r.Status) + // Output: Retry stopped after 5 attempts with code 204 NoContent +} + +func TestSendWithSenderRunsDecoratorsInOrder(t *testing.T) { + client := mocks.NewSender() + s := "" + + r, err := SendWithSender(client, mocks.NewRequest(), + withMessage(&s, "a"), + withMessage(&s, "b"), + withMessage(&s, "c")) + if err != nil { + t.Fatalf("autorest: SendWithSender returned an error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if s != "abc" { + t.Fatalf("autorest: SendWithSender invoke decorators out of order; expected 'abc', received '%s'", s) + } +} + +func TestCreateSender(t *testing.T) { + f := false + + s := CreateSender( + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + s.Do(&http.Request{}) + + if !f { + t.Fatal("autorest: CreateSender failed to apply supplied decorator") + } +} + +func TestSend(t *testing.T) { + f := false + + Send(&http.Request{}, + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + f = true + return nil, nil + }) + } + })()) + + if !f { + t.Fatal("autorest: Send failed to apply supplied decorator") + } +} + +func TestAfterDelayWaits(t *testing.T) { + client := mocks.NewSender() + + d := 2 * time.Second + + tt := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + s := time.Since(tt) + if s < d { + t.Fatal("autorest: AfterDelay failed to wait for at least the specified duration") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestAfterDelay_Cancels(t *testing.T) { + client := mocks.NewSender() + ctx, cancel := context.WithCancel(context.Background()) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + end := time.Now() + var err error + go func() { + req := mocks.NewRequest() + req = req.WithContext(ctx) + _, err = SendWithSender(client, req, + AfterDelay(delay)) + end = time.Now() + wg.Done() + }() + cancel() + wg.Wait() + time.Sleep(5 * time.Millisecond) + if end.Sub(start) >= delay { + t.Fatal("autorest: AfterDelay elapsed") + } + if err == nil { + t.Fatal("autorest: AfterDelay didn't cancel") + } +} + +func TestAfterDelayDoesNotWaitTooLong(t *testing.T) { + client := mocks.NewSender() + + d := 5 * time.Millisecond + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + AfterDelay(d)) + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: AfterDelay waited too long (exceeded 5 times specified duration)") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestAsIs(t *testing.T) { + client := mocks.NewSender() + + r1 := mocks.NewResponse() + client.AppendResponse(r1) + + r2, err := SendWithSender(client, mocks.NewRequest(), + AsIs()) + if err != nil { + t.Fatalf("autorest: AsIs returned an unexpected error (%v)", err) + } else if !reflect.DeepEqual(r1, r2) { + t.Fatalf("autorest: AsIs modified the response -- received %v, expected %v", r2, r1) + } + + Respond(r1, + ByDiscardingBody(), + ByClosing()) + Respond(r2, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoCloseIfError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + + if r.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected DoCloseIfError to close response body -- it was left open") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoCloseIfErrorAcceptsNilResponse(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + return nil, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoCloseIfErrorAcceptsNilBody(t *testing.T) { + client := mocks.NewSender() + + SendWithSender(client, mocks.NewRequest(), + (func() SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err != nil { + resp.Body.Close() + } + resp.Body = nil + return resp, fmt.Errorf("Faux Error") + }) + } + })(), + DoCloseIfError()) +} + +func TestDoErrorIfStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to emit an error for passed code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorIfStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorIfStatusCode(http.StatusBadRequest), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorIfStatusCode failed to ignore a status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorUnlessStatusCode(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(mocks.NewResponseWithStatus("400 BadRequest", http.StatusBadRequest)) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: DoErrorUnlessStatusCode failed to emit an error for an unknown status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoErrorUnlessStatusCodeIgnoresStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoErrorUnlessStatusCode(http.StatusAccepted), + DoCloseIfError()) + if err != nil { + t.Fatal("autorest: DoErrorUnlessStatusCode emitted an error for a knonwn status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForAttempts failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForAttempts returned an unexpected error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForAttemptsStopsAfterAttempts(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), 10) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(5, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 5 { + t.Fatal("autorest: DoRetryForAttempts failed to stop after specified number of attempts") + } +} + +func TestDoRetryForAttemptsReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForAttempts(1, time.Duration(0))) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForAttempts failed to return the underlying response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterSuccess(t *testing.T) { + client := mocks.NewSender() + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0))) + if client.Attempts() != 1 { + t.Fatalf("autorest: DoRetryForDuration failed to stop after success -- expected attempts %v, actual %v", + 1, client.Attempts()) + } + if err != nil { + t.Fatalf("autorest: DoRetryForDuration returned an unexpected error (%v)", err) + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsAfterDuration(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Millisecond + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) < d { + t.Fatal("autorest: DoRetryForDuration failed stopped too soon") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationStopsWithinReason(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + d := 5 * time.Second + start := time.Now() + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(d, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if time.Since(start) > (5 * d) { + t.Fatal("autorest: DoRetryForDuration failed stopped soon enough (exceeded 5 times specified duration)") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForDurationReturnsResponse(t *testing.T) { + client := mocks.NewSender() + client.SetAndRepeatError(fmt.Errorf("Faux Error"), -1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForDuration(10*time.Millisecond, time.Duration(0)), + DoCloseIfError()) + if err == nil { + t.Fatal("autorest: Mock client failed to emit errors") + } + + if r == nil { + t.Fatal("autorest: DoRetryForDuration failed to return the underlying response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDelayForBackoff(t *testing.T) { + d := 2 * time.Second + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) < d { + t.Fatal("autorest: DelayForBackoff did not delay as long as expected") + } +} + +func TestDelayForBackoff_Cancels(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + DelayForBackoff(delay, 0, cancel) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatal("autorest: DelayForBackoff failed to cancel") + } +} + +func TestDelayForBackoffWithinReason(t *testing.T) { + d := 5 * time.Second + maxCoefficient := 2 + start := time.Now() + DelayForBackoff(d, 0, nil) + if time.Since(start) > (time.Duration(maxCoefficient) * d) { + + t.Fatalf("autorest: DelayForBackoff delayed too long (exceeded %d times the specified duration)", maxCoefficient) + } +} + +func TestDoPollForStatusCodes_IgnoresUnspecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Duration(0), time.Duration(0))) + + if client.Attempts() != 1 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes polled for unspecified status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_PollsForSpecifiedStatusCodes(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() != 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to poll for specified status code") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_CanBeCanceled(t *testing.T) { + cancel := make(chan struct{}) + delay := 5 * time.Second + + r := mocks.NewResponse() + mocks.SetAcceptedHeaders(r) + client := mocks.NewSender() + client.AppendAndRepeatResponse(r, 100) + + var wg sync.WaitGroup + wg.Add(1) + start := time.Now() + go func() { + wg.Done() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + Respond(r, + ByDiscardingBody(), + ByClosing()) + }() + wg.Wait() + close(cancel) + time.Sleep(5 * time.Millisecond) + if time.Since(start) >= delay { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to cancel") + } +} + +func TestDoPollForStatusCodes_ClosesAllNonreturnedResponseBodiesWhenPolling(t *testing.T) { + resp := newAcceptedResponse() + + client := mocks.NewSender() + client.AppendAndRepeatResponse(resp, 2) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if resp.Body.(*mocks.Body).IsOpen() || resp.Body.(*mocks.Body).CloseAttempts() < 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not close unreturned response bodies") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_LeavesLastResponseBodyOpen(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(newAcceptedResponse()) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if !r.Body.(*mocks.Body).IsOpen() { + t.Fatalf("autorest: Sender#DoPollForStatusCodes did not leave open the body of the last response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_StopsPollingAfterAnError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if client.Attempts() > 2 { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to stop polling after receiving an error") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoPollForStatusCodes_ReturnsPollingError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(newAcceptedResponse(), 5) + client.SetError(fmt.Errorf("Faux Error")) + client.SetEmitErrorAfter(1) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoPollForStatusCodes(time.Millisecond, time.Millisecond, http.StatusAccepted)) + + if err == nil { + t.Fatalf("autorest: Sender#DoPollForStatusCodes failed to return error from polling") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestWithLogging_Logs(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + + r, _ := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestWithLogging_HandlesMissingResponse(t *testing.T) { + buf := &bytes.Buffer{} + logger := log.New(buf, "autorest: ", 0) + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fmt.Errorf("Faux Error")) + + r, err := SendWithSender(client, &http.Request{}, + WithLogging(logger)) + + if r != nil || err == nil { + t.Fatal("autorest: Sender#WithLogging returned a valid response -- expecting nil") + } + if buf.String() == "" { + t.Fatal("autorest: Sender#WithLogging failed to log the request for a nil response") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) +} + +func TestDoRetryForStatusCodesWithSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("408 Request Timeout", http.StatusRequestTimeout), 2) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(5, time.Duration(2*time.Second), http.StatusRequestTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodesWithNoSuccess(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("504 Gateway Timeout", http.StatusGatewayTimeout), 5) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(2, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: failed stop after %v retry attempts; Want: Stop after 2 retry attempts", + client.Attempts()-1) + } +} + +func TestDoRetryForStatusCodes_CodeNotInRetryList(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 1) + + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 1 || r.Status != "204 No Content" { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Retry attempts %v for StatusCode %v; Want: 0 attempts for StatusCode 204", + client.Attempts(), r.Status) + } +} + +func TestDoRetryForStatusCodes_RequestBodyReadError(t *testing.T) { + client := mocks.NewSender() + client.AppendAndRepeatResponse(mocks.NewResponseWithStatus("204 No Content", http.StatusNoContent), 2) + + r, err := SendWithSender(client, mocks.NewRequestWithCloseBody(), + DoRetryForStatusCodes(6, time.Duration(2*time.Second), http.StatusGatewayTimeout), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if err == nil || client.Attempts() != 0 { + t.Fatalf("autorest: Sender#DoRetryForStatusCodes -- Got: Not failed for request body read error; Want: Failed for body read error - %v", err) + } +} + +func newAcceptedResponse() *http.Response { + resp := mocks.NewResponseWithStatus("202 Accepted", http.StatusAccepted) + mocks.SetAcceptedHeaders(resp) + return resp +} + +func TestDelayWithRetryAfterWithSuccess(t *testing.T) { + after, retries := 2, 2 + totalSecs := after * retries + + client := mocks.NewSender() + resp := mocks.NewResponseWithStatus("429 Too many requests", http.StatusTooManyRequests) + mocks.SetResponseHeader(resp, "Retry-After", fmt.Sprintf("%v", after)) + client.AppendAndRepeatResponse(resp, retries) + client.AppendResponse(mocks.NewResponseWithStatus("200 OK", http.StatusOK)) + + d := time.Second * time.Duration(totalSecs) + start := time.Now() + r, _ := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(1, time.Duration(time.Second), http.StatusTooManyRequests), + ) + + if time.Since(start) < d { + t.Fatal("autorest: DelayWithRetryAfter failed stopped too soon") + } + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if client.Attempts() != 3 { + t.Fatalf("autorest: Sender#DelayWithRetryAfter -- Got: StatusCode %v in %v attempts; Want: StatusCode 200 OK in 2 attempts -- ", + r.Status, client.Attempts()-1) + } +} + +type temporaryError struct { + message string +} + +func (te temporaryError) Error() string { + return te.message +} + +func (te temporaryError) Timeout() bool { + return true +} + +func (te temporaryError) Temporary() bool { + return true +} + +func TestDoRetryForStatusCodes_NilResponseTemporaryError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(temporaryError{message: "faux error"}) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(3, time.Duration(1*time.Second), StatusCodesForRetry...), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if err != nil || client.Attempts() != 2 { + t.Fatalf("autorest: Sender#TestDoRetryForStatusCodes_NilResponseTemporaryError -- Got: non-nil error or wrong number of attempts - %v", err) + } +} + +func TestDoRetryForStatusCodes_NilResponseTemporaryError2(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fmt.Errorf("faux error")) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(3, time.Duration(1*time.Second), StatusCodesForRetry...), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if err != nil || client.Attempts() != 2 { + t.Fatalf("autorest: Sender#TestDoRetryForStatusCodes_NilResponseTemporaryError2 -- Got: nil error or wrong number of attempts - %v", err) + } +} + +type fatalError struct { + message string +} + +func (fe fatalError) Error() string { + return fe.message +} + +func (fe fatalError) Timeout() bool { + return false +} + +func (fe fatalError) Temporary() bool { + return false +} + +func TestDoRetryForStatusCodes_NilResponseFatalError(t *testing.T) { + client := mocks.NewSender() + client.AppendResponse(nil) + client.SetError(fatalError{"fatal error"}) + + r, err := SendWithSender(client, mocks.NewRequest(), + DoRetryForStatusCodes(3, time.Duration(1*time.Second), StatusCodesForRetry...), + ) + + Respond(r, + ByDiscardingBody(), + ByClosing()) + + if err == nil || client.Attempts() > 1 { + t.Fatalf("autorest: Sender#TestDoRetryForStatusCodes_NilResponseFatalError -- Got: nil error or wrong number of attempts - %v", err) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go new file mode 100644 index 000000000000..fdda2ce1aa8e --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert.go @@ -0,0 +1,147 @@ +/* +Package to provides helpers to ease working with pointer values of marshalled structures. +*/ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// String returns a string value for the passed string pointer. It returns the empty string if the +// pointer is nil. +func String(s *string) string { + if s != nil { + return *s + } + return "" +} + +// StringPtr returns a pointer to the passed string. +func StringPtr(s string) *string { + return &s +} + +// StringSlice returns a string slice value for the passed string slice pointer. It returns a nil +// slice if the pointer is nil. +func StringSlice(s *[]string) []string { + if s != nil { + return *s + } + return nil +} + +// StringSlicePtr returns a pointer to the passed string slice. +func StringSlicePtr(s []string) *[]string { + return &s +} + +// StringMap returns a map of strings built from the map of string pointers. The empty string is +// used for nil pointers. +func StringMap(msp map[string]*string) map[string]string { + ms := make(map[string]string, len(msp)) + for k, sp := range msp { + if sp != nil { + ms[k] = *sp + } else { + ms[k] = "" + } + } + return ms +} + +// StringMapPtr returns a pointer to a map of string pointers built from the passed map of strings. +func StringMapPtr(ms map[string]string) *map[string]*string { + msp := make(map[string]*string, len(ms)) + for k, s := range ms { + msp[k] = StringPtr(s) + } + return &msp +} + +// Bool returns a bool value for the passed bool pointer. It returns false if the pointer is nil. +func Bool(b *bool) bool { + if b != nil { + return *b + } + return false +} + +// BoolPtr returns a pointer to the passed bool. +func BoolPtr(b bool) *bool { + return &b +} + +// Int returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int(i *int) int { + if i != nil { + return *i + } + return 0 +} + +// IntPtr returns a pointer to the passed int. +func IntPtr(i int) *int { + return &i +} + +// Int32 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int32(i *int32) int32 { + if i != nil { + return *i + } + return 0 +} + +// Int32Ptr returns a pointer to the passed int32. +func Int32Ptr(i int32) *int32 { + return &i +} + +// Int64 returns an int value for the passed int pointer. It returns 0 if the pointer is nil. +func Int64(i *int64) int64 { + if i != nil { + return *i + } + return 0 +} + +// Int64Ptr returns a pointer to the passed int64. +func Int64Ptr(i int64) *int64 { + return &i +} + +// Float32 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float32(i *float32) float32 { + if i != nil { + return *i + } + return 0.0 +} + +// Float32Ptr returns a pointer to the passed float32. +func Float32Ptr(i float32) *float32 { + return &i +} + +// Float64 returns an int value for the passed int pointer. It returns 0.0 if the pointer is nil. +func Float64(i *float64) float64 { + if i != nil { + return *i + } + return 0.0 +} + +// Float64Ptr returns a pointer to the passed float64. +func Float64Ptr(i float64) *float64 { + return &i +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go b/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go new file mode 100644 index 000000000000..f8177a163403 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/to/convert_test.go @@ -0,0 +1,234 @@ +package to + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "reflect" + "testing" +) + +func TestString(t *testing.T) { + v := "" + if String(&v) != v { + t.Fatalf("to: String failed to return the correct string -- expected %v, received %v", + v, String(&v)) + } +} + +func TestStringHandlesNil(t *testing.T) { + if String(nil) != "" { + t.Fatalf("to: String failed to correctly convert nil -- expected %v, received %v", + "", String(nil)) + } +} + +func TestStringPtr(t *testing.T) { + v := "" + if *StringPtr(v) != v { + t.Fatalf("to: StringPtr failed to return the correct string -- expected %v, received %v", + v, *StringPtr(v)) + } +} + +func TestStringSlice(t *testing.T) { + v := []string{} + if out := StringSlice(&v); !reflect.DeepEqual(out, v) { + t.Fatalf("to: StringSlice failed to return the correct slice -- expected %v, received %v", + v, out) + } +} + +func TestStringSliceHandlesNil(t *testing.T) { + if out := StringSlice(nil); out != nil { + t.Fatalf("to: StringSlice failed to correctly convert nil -- expected %v, received %v", + nil, out) + } +} + +func TestStringSlicePtr(t *testing.T) { + v := []string{"a", "b"} + if out := StringSlicePtr(v); !reflect.DeepEqual(*out, v) { + t.Fatalf("to: StringSlicePtr failed to return the correct slice -- expected %v, received %v", + v, *out) + } +} + +func TestStringMap(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": StringPtr("bar"), "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if *msp[k] != v { + t.Fatalf("to: StringMap incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapHandlesNil(t *testing.T) { + msp := map[string]*string{"foo": StringPtr("foo"), "bar": nil, "baz": StringPtr("baz")} + for k, v := range StringMap(msp) { + if msp[k] == nil && v != "" { + t.Fatalf("to: StringMap incorrectly converted a nil entry -- expected [%s]%v, received[%s]%v", + k, v, k, *msp[k]) + } + } +} + +func TestStringMapPtr(t *testing.T) { + ms := map[string]string{"foo": "foo", "bar": "bar", "baz": "baz"} + for k, msp := range *StringMapPtr(ms) { + if ms[k] != *msp { + t.Fatalf("to: StringMapPtr incorrectly converted an entry -- expected [%s]%v, received[%s]%v", + k, ms[k], k, *msp) + } + } +} + +func TestBool(t *testing.T) { + v := false + if Bool(&v) != v { + t.Fatalf("to: Bool failed to return the correct string -- expected %v, received %v", + v, Bool(&v)) + } +} + +func TestBoolHandlesNil(t *testing.T) { + if Bool(nil) != false { + t.Fatalf("to: Bool failed to correctly convert nil -- expected %v, received %v", + false, Bool(nil)) + } +} + +func TestBoolPtr(t *testing.T) { + v := false + if *BoolPtr(v) != v { + t.Fatalf("to: BoolPtr failed to return the correct string -- expected %v, received %v", + v, *BoolPtr(v)) + } +} + +func TestInt(t *testing.T) { + v := 0 + if Int(&v) != v { + t.Fatalf("to: Int failed to return the correct string -- expected %v, received %v", + v, Int(&v)) + } +} + +func TestIntHandlesNil(t *testing.T) { + if Int(nil) != 0 { + t.Fatalf("to: Int failed to correctly convert nil -- expected %v, received %v", + 0, Int(nil)) + } +} + +func TestIntPtr(t *testing.T) { + v := 0 + if *IntPtr(v) != v { + t.Fatalf("to: IntPtr failed to return the correct string -- expected %v, received %v", + v, *IntPtr(v)) + } +} + +func TestInt32(t *testing.T) { + v := int32(0) + if Int32(&v) != v { + t.Fatalf("to: Int32 failed to return the correct string -- expected %v, received %v", + v, Int32(&v)) + } +} + +func TestInt32HandlesNil(t *testing.T) { + if Int32(nil) != int32(0) { + t.Fatalf("to: Int32 failed to correctly convert nil -- expected %v, received %v", + 0, Int32(nil)) + } +} + +func TestInt32Ptr(t *testing.T) { + v := int32(0) + if *Int32Ptr(v) != v { + t.Fatalf("to: Int32Ptr failed to return the correct string -- expected %v, received %v", + v, *Int32Ptr(v)) + } +} + +func TestInt64(t *testing.T) { + v := int64(0) + if Int64(&v) != v { + t.Fatalf("to: Int64 failed to return the correct string -- expected %v, received %v", + v, Int64(&v)) + } +} + +func TestInt64HandlesNil(t *testing.T) { + if Int64(nil) != int64(0) { + t.Fatalf("to: Int64 failed to correctly convert nil -- expected %v, received %v", + 0, Int64(nil)) + } +} + +func TestInt64Ptr(t *testing.T) { + v := int64(0) + if *Int64Ptr(v) != v { + t.Fatalf("to: Int64Ptr failed to return the correct string -- expected %v, received %v", + v, *Int64Ptr(v)) + } +} + +func TestFloat32(t *testing.T) { + v := float32(0) + if Float32(&v) != v { + t.Fatalf("to: Float32 failed to return the correct string -- expected %v, received %v", + v, Float32(&v)) + } +} + +func TestFloat32HandlesNil(t *testing.T) { + if Float32(nil) != float32(0) { + t.Fatalf("to: Float32 failed to correctly convert nil -- expected %v, received %v", + 0, Float32(nil)) + } +} + +func TestFloat32Ptr(t *testing.T) { + v := float32(0) + if *Float32Ptr(v) != v { + t.Fatalf("to: Float32Ptr failed to return the correct string -- expected %v, received %v", + v, *Float32Ptr(v)) + } +} + +func TestFloat64(t *testing.T) { + v := float64(0) + if Float64(&v) != v { + t.Fatalf("to: Float64 failed to return the correct string -- expected %v, received %v", + v, Float64(&v)) + } +} + +func TestFloat64HandlesNil(t *testing.T) { + if Float64(nil) != float64(0) { + t.Fatalf("to: Float64 failed to correctly convert nil -- expected %v, received %v", + 0, Float64(nil)) + } +} + +func TestFloat64Ptr(t *testing.T) { + v := float64(0) + if *Float64Ptr(v) != v { + t.Fatalf("to: Float64Ptr failed to return the correct string -- expected %v, received %v", + v, *Float64Ptr(v)) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility.go b/vendor/github.com/Azure/go-autorest/autorest/utility.go new file mode 100644 index 000000000000..bfddd90b5bfa --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility.go @@ -0,0 +1,228 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "fmt" + "io" + "net" + "net/http" + "net/url" + "reflect" + "strings" + + "github.com/Azure/go-autorest/autorest/adal" +) + +// EncodedAs is a series of constants specifying various data encodings +type EncodedAs string + +const ( + // EncodedAsJSON states that data is encoded as JSON + EncodedAsJSON EncodedAs = "JSON" + + // EncodedAsXML states that data is encoded as Xml + EncodedAsXML EncodedAs = "XML" +) + +// Decoder defines the decoding method json.Decoder and xml.Decoder share +type Decoder interface { + Decode(v interface{}) error +} + +// NewDecoder creates a new decoder appropriate to the passed encoding. +// encodedAs specifies the type of encoding and r supplies the io.Reader containing the +// encoded data. +func NewDecoder(encodedAs EncodedAs, r io.Reader) Decoder { + if encodedAs == EncodedAsJSON { + return json.NewDecoder(r) + } else if encodedAs == EncodedAsXML { + return xml.NewDecoder(r) + } + return nil +} + +// CopyAndDecode decodes the data from the passed io.Reader while making a copy. Having a copy +// is especially useful if there is a chance the data will fail to decode. +// encodedAs specifies the expected encoding, r provides the io.Reader to the data, and v +// is the decoding destination. +func CopyAndDecode(encodedAs EncodedAs, r io.Reader, v interface{}) (bytes.Buffer, error) { + b := bytes.Buffer{} + return b, NewDecoder(encodedAs, io.TeeReader(r, &b)).Decode(v) +} + +// TeeReadCloser returns a ReadCloser that writes to w what it reads from rc. +// It utilizes io.TeeReader to copy the data read and has the same behavior when reading. +// Further, when it is closed, it ensures that rc is closed as well. +func TeeReadCloser(rc io.ReadCloser, w io.Writer) io.ReadCloser { + return &teeReadCloser{rc, io.TeeReader(rc, w)} +} + +type teeReadCloser struct { + rc io.ReadCloser + r io.Reader +} + +func (t *teeReadCloser) Read(p []byte) (int, error) { + return t.r.Read(p) +} + +func (t *teeReadCloser) Close() error { + return t.rc.Close() +} + +func containsInt(ints []int, n int) bool { + for _, i := range ints { + if i == n { + return true + } + } + return false +} + +func escapeValueStrings(m map[string]string) map[string]string { + for key, value := range m { + m[key] = url.QueryEscape(value) + } + return m +} + +func ensureValueStrings(mapOfInterface map[string]interface{}) map[string]string { + mapOfStrings := make(map[string]string) + for key, value := range mapOfInterface { + mapOfStrings[key] = ensureValueString(value) + } + return mapOfStrings +} + +func ensureValueString(value interface{}) string { + if value == nil { + return "" + } + switch v := value.(type) { + case string: + return v + case []byte: + return string(v) + default: + return fmt.Sprintf("%v", v) + } +} + +// MapToValues method converts map[string]interface{} to url.Values. +func MapToValues(m map[string]interface{}) url.Values { + v := url.Values{} + for key, value := range m { + x := reflect.ValueOf(value) + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + for i := 0; i < x.Len(); i++ { + v.Add(key, ensureValueString(x.Index(i))) + } + } else { + v.Add(key, ensureValueString(value)) + } + } + return v +} + +// AsStringSlice method converts interface{} to []string. This expects a +//that the parameter passed to be a slice or array of a type that has the underlying +//type a string. +func AsStringSlice(s interface{}) ([]string, error) { + v := reflect.ValueOf(s) + if v.Kind() != reflect.Slice && v.Kind() != reflect.Array { + return nil, NewError("autorest", "AsStringSlice", "the value's type is not an array.") + } + stringSlice := make([]string, 0, v.Len()) + + for i := 0; i < v.Len(); i++ { + stringSlice = append(stringSlice, v.Index(i).String()) + } + return stringSlice, nil +} + +// String method converts interface v to string. If interface is a list, it +// joins list elements using the seperator. Note that only sep[0] will be used for +// joining if any separator is specified. +func String(v interface{}, sep ...string) string { + if len(sep) == 0 { + return ensureValueString(v) + } + stringSlice, ok := v.([]string) + if ok == false { + var err error + stringSlice, err = AsStringSlice(v) + if err != nil { + panic(fmt.Sprintf("autorest: Couldn't convert value to a string %s.", err)) + } + } + return ensureValueString(strings.Join(stringSlice, sep[0])) +} + +// Encode method encodes url path and query parameters. +func Encode(location string, v interface{}, sep ...string) string { + s := String(v, sep...) + switch strings.ToLower(location) { + case "path": + return pathEscape(s) + case "query": + return queryEscape(s) + default: + return s + } +} + +func pathEscape(s string) string { + return strings.Replace(url.QueryEscape(s), "+", "%20", -1) +} + +func queryEscape(s string) string { + return url.QueryEscape(s) +} + +// ChangeToGet turns the specified http.Request into a GET (it assumes it wasn't). +// This is mainly useful for long-running operations that use the Azure-AsyncOperation +// header, so we change the initial PUT into a GET to retrieve the final result. +func ChangeToGet(req *http.Request) *http.Request { + req.Method = "GET" + req.Body = nil + req.ContentLength = 0 + req.Header.Del("Content-Length") + return req +} + +// IsTokenRefreshError returns true if the specified error implements the TokenRefreshError +// interface. If err is a DetailedError it will walk the chain of Original errors. +func IsTokenRefreshError(err error) bool { + if _, ok := err.(adal.TokenRefreshError); ok { + return true + } + if de, ok := err.(DetailedError); ok { + return IsTokenRefreshError(de.Original) + } + return false +} + +// IsTemporaryNetworkError returns true if the specified error is a temporary network error or false +// if it's not. If the error doesn't implement the net.Error interface the return value is true. +func IsTemporaryNetworkError(err error) bool { + if netErr, ok := err.(net.Error); !ok || (ok && netErr.Temporary()) { + return true + } + return false +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utility_test.go b/vendor/github.com/Azure/go-autorest/autorest/utility_test.go new file mode 100644 index 000000000000..1666544398f4 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utility_test.go @@ -0,0 +1,462 @@ +package autorest + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "encoding/json" + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "reflect" + "sort" + "strings" + "testing" + + "github.com/Azure/go-autorest/autorest/mocks" +) + +const ( + jsonT = ` + { + "name":"Rob Pike", + "age":42 + }` + xmlT = ` + + Rob Pike + 42 + ` +) + +func TestNewDecoderCreatesJSONDecoder(t *testing.T) { + d := NewDecoder(EncodedAsJSON, strings.NewReader(jsonT)) + _, ok := d.(*json.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create a JSON decoder when requested") + } +} + +func TestNewDecoderCreatesXMLDecoder(t *testing.T) { + d := NewDecoder(EncodedAsXML, strings.NewReader(xmlT)) + _, ok := d.(*xml.Decoder) + if d == nil || !ok { + t.Fatal("autorest: NewDecoder failed to create an XML decoder when requested") + } +} + +func TestNewDecoderReturnsNilForUnknownEncoding(t *testing.T) { + d := NewDecoder("unknown", strings.NewReader(xmlT)) + if d != nil { + t.Fatal("autorest: NewDecoder created a decoder for an unknown encoding") + } +} + +func TestCopyAndDecodeDecodesJSON(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid JSON - %v", err) + } +} + +func TestCopyAndDecodeDecodesXML(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT), &mocks.T{}) + if err != nil { + t.Fatalf("autorest: CopyAndDecode returned an error with valid XML - %v", err) + } +} + +func TestCopyAndDecodeReturnsJSONDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT[0:len(jsonT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid JSON") + } +} + +func TestCopyAndDecodeReturnsXMLDecodingErrors(t *testing.T) { + _, err := CopyAndDecode(EncodedAsXML, strings.NewReader(xmlT[0:len(xmlT)-2]), &mocks.T{}) + if err == nil { + t.Fatalf("autorest: CopyAndDecode failed to return an error with invalid XML") + } +} + +func TestCopyAndDecodeAlwaysReturnsACopy(t *testing.T) { + b, _ := CopyAndDecode(EncodedAsJSON, strings.NewReader(jsonT), &mocks.T{}) + if b.String() != jsonT { + t.Fatalf("autorest: CopyAndDecode failed to return a valid copy of the data - %v", b.String()) + } +} + +func TestTeeReadCloser_Copies(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + b := &bytes.Buffer{} + + r.Body = TeeReadCloser(r.Body, b) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.String() != jsonT { + t.Fatalf("autorest: TeeReadCloser failed to copy the bytes read") + } +} + +func TestTeeReadCloser_PassesReadErrors(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + r.Body.(*mocks.Body).Close() + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err == nil { + t.Fatalf("autorest: TeeReadCloser failed to return the expected error") + } +} + +func TestTeeReadCloser_ClosesWrappedReader(t *testing.T) { + v := &mocks.T{} + r := mocks.NewResponseWithContent(jsonT) + + b := r.Body.(*mocks.Body) + r.Body = TeeReadCloser(r.Body, &bytes.Buffer{}) + err := Respond(r, + ByUnmarshallingJSON(v), + ByClosing()) + if err != nil { + t.Fatalf("autorest: TeeReadCloser returned an unexpected error -- %v", err) + } + if b.IsOpen() { + t.Fatalf("autorest: TeeReadCloser failed to close the nested io.ReadCloser") + } +} + +func TestContainsIntFindsValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 5 + if !containsInt(ints, v) { + t.Fatalf("autorest: containsInt failed to find %v in %v", v, ints) + } +} + +func TestContainsIntDoesNotFindValue(t *testing.T) { + ints := []int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9} + v := 42 + if containsInt(ints, v) { + t.Fatalf("autorest: containsInt unexpectedly found %v in %v", v, ints) + } +} + +func TestContainsIntAcceptsEmptyList(t *testing.T) { + ints := make([]int, 10) + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an empty list") + } +} + +func TestContainsIntAcceptsNilList(t *testing.T) { + var ints []int + if containsInt(ints, 42) { + t.Fatalf("autorest: containsInt failed to handle an nil list") + } +} + +func TestEscapeStrings(t *testing.T) { + m := map[string]string{ + "string": "a long string with = odd characters", + "int": "42", + "nil": "", + } + r := map[string]string{ + "string": "a+long+string+with+%3D+odd+characters", + "int": "42", + "nil": "", + } + v := escapeValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func TestEnsureStrings(t *testing.T) { + m := map[string]interface{}{ + "string": "string", + "int": 42, + "nil": nil, + "bytes": []byte{255, 254, 253}, + } + r := map[string]string{ + "string": "string", + "int": "42", + "nil": "", + "bytes": string([]byte{255, 254, 253}), + } + v := ensureValueStrings(m) + if !reflect.DeepEqual(v, r) { + t.Fatalf("autorest: ensureValueStrings returned %v\n", v) + } +} + +func ExampleString() { + m := []string{ + "string1", + "string2", + "string3", + } + + fmt.Println(String(m, ",")) + // Output: string1,string2,string3 +} + +func TestStringWithValidString(t *testing.T) { + i := 123 + if got, want := String(i), "123"; got != want { + t.Logf("got: %q\nwant: %q", got, want) + t.Fail() + } +} + +func TestStringWithStringSlice(t *testing.T) { + s := []string{"string1", "string2"} + if got, want := String(s, ","), "string1,string2"; got != want { + t.Logf("got: %q\nwant: %q", got, want) + t.Fail() + } +} + +func TestStringWithEnum(t *testing.T) { + type TestEnumType string + s := TestEnumType("string1") + if got, want := String(s), "string1"; got != want { + t.Logf("got: %q\nwant: %q", got, want) + t.Fail() + } +} + +func TestStringWithEnumSlice(t *testing.T) { + type TestEnumType string + s := []TestEnumType{"string1", "string2"} + if got, want := String(s, ","), "string1,string2"; got != want { + t.Logf("got: %q\nwant: %q", got, want) + t.Fail() + } +} + +func ExampleAsStringSlice() { + type TestEnumType string + + a := []TestEnumType{"value1", "value2"} + b, _ := AsStringSlice(a) + for _, c := range b { + fmt.Println(c) + } + // Output: + // value1 + // value2 +} + +func TestEncodeWithValidPath(t *testing.T) { + s := Encode("Path", "Hello Gopher") + if s != "Hello%20Gopher" { + t.Fatalf("autorest: Encode method failed for valid path encoding. Got: %v; Want: %v", s, "Hello%20Gopher") + } +} + +func TestEncodeWithValidQuery(t *testing.T) { + s := Encode("Query", "Hello Gopher") + if s != "Hello+Gopher" { + t.Fatalf("autorest: Encode method failed for valid query encoding. Got: '%v'; Want: 'Hello+Gopher'", s) + } +} + +func TestEncodeWithValidNotPathQuery(t *testing.T) { + s := Encode("Host", "Hello Gopher") + if s != "Hello Gopher" { + t.Fatalf("autorest: Encode method failed for parameter not query or path. Got: '%v'; Want: 'Hello Gopher'", s) + } +} + +func TestMapToValues(t *testing.T) { + m := map[string]interface{}{ + "a": "a", + "b": 2, + } + v := url.Values{} + v.Add("a", "a") + v.Add("b", "2") + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +func TestMapToValuesWithArrayValues(t *testing.T) { + m := map[string]interface{}{ + "a": []string{"a", "b"}, + "b": 2, + "c": []int{3, 4}, + } + v := url.Values{} + v.Add("a", "a") + v.Add("a", "b") + v.Add("b", "2") + v.Add("c", "3") + v.Add("c", "4") + + if !isEqual(v, MapToValues(m)) { + t.Fatalf("autorest: MapToValues method failed to return correct values - expected(%v) got(%v)", v, MapToValues(m)) + } +} + +type someTempError struct{} + +func (s someTempError) Error() string { + return "temporary error" +} +func (s someTempError) Timeout() bool { + return true +} +func (s someTempError) Temporary() bool { + return true +} + +func TestIsTemporaryNetworkErrorTrue(t *testing.T) { + if !IsTemporaryNetworkError(someTempError{}) { + t.Fatal("expected someTempError to be a temporary network error") + } + if !IsTemporaryNetworkError(errors.New("non-temporary network error")) { + t.Fatal("expected random error to be a temporary network error") + } +} + +type someFatalError struct{} + +func (s someFatalError) Error() string { + return "fatal error" +} +func (s someFatalError) Timeout() bool { + return false +} +func (s someFatalError) Temporary() bool { + return false +} + +func TestIsTemporaryNetworkErrorFalse(t *testing.T) { + if IsTemporaryNetworkError(someFatalError{}) { + t.Fatal("expected someFatalError to be a fatal network error") + } +} + +func isEqual(v, u url.Values) bool { + for key, value := range v { + if len(u[key]) == 0 { + return false + } + sort.Strings(value) + sort.Strings(u[key]) + for i := range value { + if value[i] != u[key][i] { + return false + } + } + u.Del(key) + } + if len(u) > 0 { + return false + } + return true +} + +func doEnsureBodyClosed(t *testing.T) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if resp != nil && resp.Body != nil && resp.Body.(*mocks.Body).IsOpen() { + t.Fatal("autorest: Expected Body to be closed -- it was left open") + } + return resp, err + }) + } +} + +type mockAuthorizer struct{} + +func (ma mockAuthorizer) WithAuthorization() PrepareDecorator { + return WithHeader(headerAuthorization, mocks.TestAuthorizationHeader) +} + +type mockFailingAuthorizer struct{} + +func (mfa mockFailingAuthorizer) WithAuthorization() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + return r, fmt.Errorf("ERROR: mockFailingAuthorizer returned expected error") + }) + } +} + +type mockInspector struct { + wasInvoked bool +} + +func (mi *mockInspector) WithInspection() PrepareDecorator { + return func(p Preparer) Preparer { + return PreparerFunc(func(r *http.Request) (*http.Request, error) { + mi.wasInvoked = true + return p.Prepare(r) + }) + } +} + +func (mi *mockInspector) ByInspecting() RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + mi.wasInvoked = true + return r.Respond(resp) + }) + } +} + +func withMessage(output *string, msg string) SendDecorator { + return func(s Sender) Sender { + return SenderFunc(func(r *http.Request) (*http.Response, error) { + resp, err := s.Do(r) + if err == nil { + *output += msg + } + return resp, err + }) + } +} + +func withErrorRespondDecorator(e *error) RespondDecorator { + return func(r Responder) Responder { + return ResponderFunc(func(resp *http.Response) error { + err := r.Respond(resp) + if err != nil { + return err + } + *e = fmt.Errorf("autorest: Faux Respond Error") + return *e + }) + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go b/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go new file mode 100644 index 000000000000..22893809fd74 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utils/auth.go @@ -0,0 +1,61 @@ +package utils + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "os" + + "github.com/Azure/go-autorest/autorest" + "github.com/Azure/go-autorest/autorest/adal" + "github.com/Azure/go-autorest/autorest/azure" +) + +// GetAuthorizer gets an Azure Service Principal authorizer. +// This func assumes "AZURE_TENANT_ID", "AZURE_CLIENT_ID", +// "AZURE_CLIENT_SECRET" are set as environment variables. +// +// Deprecated: Use ClientCredentialsConfig.Authorizer() from the +// github.com/Azure/go-autorest/autorest/azure/auth package instead. +func GetAuthorizer(env azure.Environment) (*autorest.BearerAuthorizer, error) { + tenantID := GetEnvVarOrExit("AZURE_TENANT_ID") + + oauthConfig, err := adal.NewOAuthConfig(env.ActiveDirectoryEndpoint, tenantID) + if err != nil { + return nil, err + } + + clientID := GetEnvVarOrExit("AZURE_CLIENT_ID") + clientSecret := GetEnvVarOrExit("AZURE_CLIENT_SECRET") + + spToken, err := adal.NewServicePrincipalToken(*oauthConfig, clientID, clientSecret, env.ResourceManagerEndpoint) + if err != nil { + return nil, err + } + + return autorest.NewBearerAuthorizer(spToken), nil +} + +// GetEnvVarOrExit returns the value of specified environment variable or terminates if it's not defined. +// +// Deprecated: This has been supreseeded by the github.com/Azure/go-autorest/autorest/azure/auth package. +func GetEnvVarOrExit(varName string) string { + value := os.Getenv(varName) + if value == "" { + fmt.Printf("Missing environment variable %s\n", varName) + os.Exit(1) + } + return value +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go b/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go new file mode 100644 index 000000000000..9bc4e3e04064 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/utils/commit.go @@ -0,0 +1,32 @@ +package utils + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "os/exec" +) + +// GetCommit returns git HEAD (short) +func GetCommit() string { + cmd := exec.Command("git", "rev-parse", "HEAD") + var out bytes.Buffer + cmd.Stdout = &out + err := cmd.Run() + if err != nil { + return "" + } + return string(out.Bytes()[:7]) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/error.go b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go new file mode 100644 index 000000000000..fed156dbf6e6 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/error.go @@ -0,0 +1,48 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" +) + +// Error is the type that's returned when the validation of an APIs arguments constraints fails. +type Error struct { + // PackageType is the package type of the object emitting the error. For types, the value + // matches that produced the the '%T' format specifier of the fmt package. For other elements, + // such as functions, it is just the package name (e.g., "autorest"). + PackageType string + + // Method is the name of the method raising the error. + Method string + + // Message is the error message. + Message string +} + +// Error returns a string containing the details of the validation failure. +func (e Error) Error() string { + return fmt.Sprintf("%s#%s: Invalid input: %s", e.PackageType, e.Method, e.Message) +} + +// NewError creates a new Error object with the specified parameters. +// message is treated as a format string to which the optional args apply. +func NewError(packageType string, method string, message string, args ...interface{}) Error { + return Error{ + PackageType: packageType, + Method: method, + Message: fmt.Sprintf(message, args...), + } +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go new file mode 100644 index 000000000000..ae987f8fae60 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation.go @@ -0,0 +1,408 @@ +/* +Package validation provides methods for validating parameter value using reflection. +*/ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "regexp" + "strings" +) + +// Constraint stores constraint name, target field name +// Rule and chain validations. +type Constraint struct { + + // Target field name for validation. + Target string + + // Constraint name e.g. minLength, MaxLength, Pattern, etc. + Name string + + // Rule for constraint e.g. greater than 10, less than 5 etc. + Rule interface{} + + // Chain Validations for struct type + Chain []Constraint +} + +// Validation stores parameter-wise validation. +type Validation struct { + TargetValue interface{} + Constraints []Constraint +} + +// Constraint list +const ( + Empty = "Empty" + Null = "Null" + ReadOnly = "ReadOnly" + Pattern = "Pattern" + MaxLength = "MaxLength" + MinLength = "MinLength" + MaxItems = "MaxItems" + MinItems = "MinItems" + MultipleOf = "MultipleOf" + UniqueItems = "UniqueItems" + InclusiveMaximum = "InclusiveMaximum" + ExclusiveMaximum = "ExclusiveMaximum" + ExclusiveMinimum = "ExclusiveMinimum" + InclusiveMinimum = "InclusiveMinimum" +) + +// Validate method validates constraints on parameter +// passed in validation array. +func Validate(m []Validation) error { + for _, item := range m { + v := reflect.ValueOf(item.TargetValue) + for _, constraint := range item.Constraints { + var err error + switch v.Kind() { + case reflect.Ptr: + err = validatePtr(v, constraint) + case reflect.String: + err = validateString(v, constraint) + case reflect.Struct: + err = validateStruct(v, constraint) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + err = validateInt(v, constraint) + case reflect.Float32, reflect.Float64: + err = validateFloat(v, constraint) + case reflect.Array, reflect.Slice, reflect.Map: + err = validateArrayMap(v, constraint) + default: + err = createError(v, constraint, fmt.Sprintf("unknown type %v", v.Kind())) + } + + if err != nil { + return err + } + } + } + return nil +} + +func validateStruct(x reflect.Value, v Constraint, name ...string) error { + //Get field name from target name which is in format a.b.c + s := strings.Split(v.Target, ".") + f := x.FieldByName(s[len(s)-1]) + if isZero(f) { + return createError(x, v, fmt.Sprintf("field %q doesn't exist", v.Target)) + } + + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(f), + Constraints: []Constraint{v}, + }, + }) +} + +func validatePtr(x reflect.Value, v Constraint) error { + if v.Name == ReadOnly { + if !x.IsNil() { + return createError(x.Elem(), v, "readonly parameter; must send as nil or empty in request") + } + return nil + } + if x.IsNil() { + return checkNil(x, v) + } + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x.Elem()), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateInt(x reflect.Value, v Constraint) error { + i := x.Int() + r, ok := toInt64(v.Rule) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case MultipleOf: + if i%r != 0 { + return createError(x, v, fmt.Sprintf("value must be a multiple of %v", r)) + } + case ExclusiveMinimum: + if i <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if i >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if i < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if i > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable for type integer", v.Name)) + } + return nil +} + +func validateFloat(x reflect.Value, v Constraint) error { + f := x.Float() + r, ok := v.Rule.(float64) + if !ok { + return createError(x, v, fmt.Sprintf("rule must be float value for %v constraint; got: %v", v.Name, v.Rule)) + } + switch v.Name { + case ExclusiveMinimum: + if f <= r { + return createError(x, v, fmt.Sprintf("value must be greater than %v", r)) + } + case ExclusiveMaximum: + if f >= r { + return createError(x, v, fmt.Sprintf("value must be less than %v", r)) + } + case InclusiveMinimum: + if f < r { + return createError(x, v, fmt.Sprintf("value must be greater than or equal to %v", r)) + } + case InclusiveMaximum: + if f > r { + return createError(x, v, fmt.Sprintf("value must be less than or equal to %v", r)) + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable for type float", v.Name)) + } + return nil +} + +func validateString(x reflect.Value, v Constraint) error { + s := x.String() + switch v.Name { + case Empty: + if len(s) == 0 { + return checkEmpty(x, v) + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + if !reg.MatchString(s) { + return createError(x, v, fmt.Sprintf("value doesn't match pattern %v", v.Rule)) + } + case MaxLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be less than or equal to %v", v.Rule)) + } + case MinLength: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer value for %v constraint; got: %v", v.Name, v.Rule)) + } + if len(s) < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("value length must be greater than or equal to %v", v.Rule)) + } + case ReadOnly: + if len(s) > 0 { + return createError(reflect.ValueOf(s), v, "readonly parameter; must send as nil or empty in request") + } + default: + return createError(x, v, fmt.Sprintf("constraint %s is not applicable to string type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func validateArrayMap(x reflect.Value, v Constraint) error { + switch v.Name { + case Null: + if x.IsNil() { + return checkNil(x, v) + } + case Empty: + if x.IsNil() || x.Len() == 0 { + return checkEmpty(x, v) + } + case MaxItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() > v.Rule.(int) { + return createError(x, v, fmt.Sprintf("maximum item limit is %v; got: %v", v.Rule, x.Len())) + } + case MinItems: + if _, ok := v.Rule.(int); !ok { + return createError(x, v, fmt.Sprintf("rule must be integer for %v constraint; got: %v", v.Name, v.Rule)) + } + if x.Len() < v.Rule.(int) { + return createError(x, v, fmt.Sprintf("minimum item limit is %v; got: %v", v.Rule, x.Len())) + } + case UniqueItems: + if x.Kind() == reflect.Array || x.Kind() == reflect.Slice { + if !checkForUniqueInArray(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else if x.Kind() == reflect.Map { + if !checkForUniqueInMap(x) { + return createError(x, v, fmt.Sprintf("all items in parameter %q must be unique; got:%v", v.Target, x)) + } + } else { + return createError(x, v, fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", v.Name, x.Kind())) + } + case ReadOnly: + if x.Len() != 0 { + return createError(x, v, "readonly parameter; must send as nil or empty in request") + } + case Pattern: + reg, err := regexp.Compile(v.Rule.(string)) + if err != nil { + return createError(x, v, err.Error()) + } + keys := x.MapKeys() + for _, k := range keys { + if !reg.MatchString(k.String()) { + return createError(k, v, fmt.Sprintf("map key doesn't match pattern %v", v.Rule)) + } + } + default: + return createError(x, v, fmt.Sprintf("constraint %v is not applicable to array, slice and map type", v.Name)) + } + + if v.Chain != nil { + return Validate([]Validation{ + { + TargetValue: getInterfaceValue(x), + Constraints: v.Chain, + }, + }) + } + return nil +} + +func checkNil(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + if v.Rule.(bool) { + return createError(x, v, "value can not be null; required parameter") + } + return nil +} + +func checkEmpty(x reflect.Value, v Constraint) error { + if _, ok := v.Rule.(bool); !ok { + return createError(x, v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + } + + if v.Rule.(bool) { + return createError(x, v, "value can not be null or empty; required parameter") + } + return nil +} + +func checkForUniqueInArray(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + arrOfInterface := make([]interface{}, x.Len()) + + for i := 0; i < x.Len(); i++ { + arrOfInterface[i] = x.Index(i).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range arrOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func checkForUniqueInMap(x reflect.Value) bool { + if x == reflect.Zero(reflect.TypeOf(x)) || x.Len() == 0 { + return false + } + mapOfInterface := make(map[interface{}]interface{}, x.Len()) + + keys := x.MapKeys() + for _, k := range keys { + mapOfInterface[k.Interface()] = x.MapIndex(k).Interface() + } + + m := make(map[interface{}]bool) + for _, val := range mapOfInterface { + if m[val] { + return false + } + m[val] = true + } + return true +} + +func getInterfaceValue(x reflect.Value) interface{} { + if x.Kind() == reflect.Invalid { + return nil + } + return x.Interface() +} + +func isZero(x interface{}) bool { + return x == reflect.Zero(reflect.TypeOf(x)).Interface() +} + +func createError(x reflect.Value, v Constraint, err string) error { + return fmt.Errorf("autorest/validation: validation failed: parameter=%s constraint=%s value=%#v details: %s", + v.Target, v.Name, getInterfaceValue(x), err) +} + +func toInt64(v interface{}) (int64, bool) { + if i64, ok := v.(int64); ok { + return i64, true + } + // older generators emit max constants as int, so if int64 fails fall back to int + if i32, ok := v.(int); ok { + return int64(i32), true + } + return 0, false +} + +// NewErrorWithValidationError appends package type and method name in +// validation error. +// +// Deprecated: Please use validation.NewError() instead. +func NewErrorWithValidationError(err error, packageType, method string) error { + return NewError(packageType, method, err.Error()) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go b/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go new file mode 100644 index 000000000000..e452e3af290f --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/validation/validation_test.go @@ -0,0 +1,2450 @@ +package validation + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "reflect" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestCheckForUniqueInArrayTrue(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3})), true) +} + +func TestCheckForUniqueInArrayFalse(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{1, 2, 3, 3})), false) +} + +func TestCheckForUniqueInArrayEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInArray(reflect.ValueOf([]int{})), false) +} + +func TestCheckForUniqueInMapTrue(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[string]int{"one": 1, "two": 2})), true) +} + +func TestCheckForUniqueInMapFalse(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{1: "one", 2: "one"})), false) +} + +func TestCheckForUniqueInMapEmpty(t *testing.T) { + require.Equal(t, checkForUniqueInMap(reflect.ValueOf(map[int]string{})), false) +} + +func TestCheckEmpty_WithValueEmptyRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithEmptyStringRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, checkEmpty(reflect.ValueOf(x), v)) +} + +func TestCheckEmpty_IncorrectRule(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", v.Name, v.Rule)) + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckEmpty_WithErrorArray(t *testing.T) { + var x interface{} = []string{} + v := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null or empty; required parameter") + require.Equal(t, checkEmpty(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleTrue(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + expected := createError(reflect.ValueOf(x), v, "value can not be null; required parameter") + require.Equal(t, checkNil(reflect.ValueOf(x), v).Error(), expected.Error()) +} + +func TestCheckNil_WithNilValueRuleFalse(t *testing.T) { + var x interface{} + v := Constraint{ + Target: "x", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"x", MaxItems, 4, nil}, + }, + } + require.Nil(t, checkNil(reflect.ValueOf(x), v)) +} + +func TestCheckNil_IncorrectRule(t *testing.T) { + var x interface{} + c := Constraint{ + Target: "str", + Name: Null, + Rule: 10, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be bool value for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, checkNil(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_WithNilValueRuleTrue(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: Null, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithNilValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithValueRuleNullTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Null, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyValueRuleTrue(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: true, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, "value can not be null or empty; required parameter") + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c), expected) +} + +func TestValidateArrayMap_WithEmptyValueRuleFalse(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_WithEmptyRuleEmptyTrue(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsNoError(t *testing.T) { + var x interface{} = []string{"1", "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MaxItemsWithError(t *testing.T) { + var x interface{} = []string{"1", "2", "3"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("maximum item limit is %v; got: 3", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MaxItemsWithEmpty(t *testing.T) { + var x interface{} = []string{} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsNoError1(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2}), c)) +} + +func TestValidateArrayMap_MinItemsNoError2(t *testing.T) { + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf([]int{1, 2, 3}), c)) +} + +func TestValidateArrayMap_MinItemsWithError(t *testing.T) { + var x interface{} = []int{1} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 1", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_MinItemsWithEmpty(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: 0", c.Rule)) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MaxItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MaxItemsNoError(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MaxItemsWithError(t *testing.T) { + a := map[int]string{1: "1", 2: "2", 3: "3"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", c.Rule, len(a))), true) +} + +func TestValidateArrayMap_Map_MaxItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MaxItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsIncorrectRule(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: false, + Chain: nil, + } + require.Equal(t, strings.Contains(validateArrayMap(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateArrayMap_Map_MinItemsNoError1(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2"} + require.Nil(t, validateArrayMap(reflect.ValueOf(x), + Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + })) +} + +func TestValidateArrayMap_Map_MinItemsNoError2(t *testing.T) { + var x interface{} = map[int]string{1: "1", 2: "2", 3: "3"} + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_MinItemsWithError(t *testing.T) { + a := map[int]string{1: "1"} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +func TestValidateArrayMap_Map_MinItemsWithEmpty(t *testing.T) { + a := map[int]string{} + var x interface{} = a + c := Constraint{ + Target: "arr", + Name: MinItems, + Rule: 2, + Chain: nil, + } + expected := createError(reflect.ValueOf(x), c, fmt.Sprintf("minimum item limit is %v; got: %v", c.Rule, len(a))) + require.Equal(t, validateArrayMap(reflect.ValueOf(x), c).Error(), expected.Error()) +} + +// func TestValidateArrayMap_Map_MinItemsNil(t *testing.T) { +// var a map[int]float64 +// var x interface{} = a +// c := Constraint{ +// Target: "str", +// Name: MinItems, +// Rule: true, +// Chain: nil, +// } +// expected := createError(reflect.Value(x), c, fmt.Sprintf("all items in parameter %v must be unique; got:%v", c.Target, x)) +// if z := validateArrayMap(reflect.ValueOf(x), c); strings.Contains(z.Error(), "all items in parameter str must be unique;") { +// t.Fatalf("autorest/validation: valiateArrayMap failed to return error \nexpect: %v;\ngot: %v", expected, z) +// } +// } + +func TestValidateArrayMap_Map_UniqueItemsTrue(t *testing.T) { + var x interface{} = map[float64]int{1.2: 1, 1.4: 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Map_UniqueItemsFalse(t *testing.T) { + var x interface{} = map[string]string{"1": "1", "2": "2", "3": "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsEmpty(t *testing.T) { + // Consider Empty map as not unique returns false + var x interface{} = map[int]float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique", c.Target)), true) +} + +func TestValidateArrayMap_Map_UniqueItemsNil(t *testing.T) { + var a map[int]float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsTrue(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_Array_UniqueItemsFalse(t *testing.T) { + var x interface{} = []string{"1", "2", "1"} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsEmpty(t *testing.T) { + // Consider Empty array as not unique returns false + var x interface{} = []float64{} + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsNil(t *testing.T) { + // Consider nil array as not unique returns false + var a []float64 + var x interface{} = a + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, x)), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidType(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("type must be array, slice or map for constraint %v; got: %v", c.Name, reflect.ValueOf(x).Kind())), true) +} + +func TestValidateArrayMap_Array_UniqueItemsInvalidConstraint(t *testing.T) { + var x interface{} = "hello" + c := Constraint{ + Target: "str", + Name: "sdad", + Rule: true, + Chain: nil, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("constraint %v is not applicable to array, slice and map type", c.Name)), true) +} + +func TestValidateArrayMap_ValidateChainConstraint1(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint2(t *testing.T) { + a := []int{1, 2, 3, 4} + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("maximum item limit is %v; got: %v", (c.Chain)[0].Rule, len(a))), true) +} + +func TestValidateArrayMap_ValidateChainConstraint3(t *testing.T) { + var a []string + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraint4(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateArrayMap_ValidateChainConstraintNilNotRequired(t *testing.T) { + var a []int + var x interface{} = a + c := Constraint{ + Target: "str", + Name: Null, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ValidateChainConstraintEmptyNotRequired(t *testing.T) { + var x interface{} = map[string]int{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateArrayMap_ReadOnlyWithError(t *testing.T) { + var x interface{} = []int{1, 2} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: []Constraint{ + {"str", MaxItems, 3, nil}, + }, + } + z := validateArrayMap(reflect.ValueOf(x), c) + require.Equal(t, strings.Contains(z.Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateArrayMap_ReadOnlyWithoutError(t *testing.T) { + var x interface{} = []int{} + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validateArrayMap(reflect.ValueOf(x), c)) +} + +func TestValidateString_ReadOnly(t *testing.T) { + var x interface{} = "Hello Gopher" + c := Constraint{ + Target: "str", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("readonly parameter; must send as nil or empty in request")), true) +} + +func TestValidateString_EmptyTrue(t *testing.T) { + // Empty true means parameter is required but Empty returns error + c := Constraint{ + Target: "str", + Name: Empty, + Rule: true, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(""), c).Error(), + fmt.Sprintf("value can not be null or empty; required parameter")), true) +} + +func TestValidateString_EmptyFalse(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + var x interface{} + c := Constraint{ + Target: "str", + Name: Empty, + Rule: false, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf(x), c)) +} + +func TestValidateString_MaxLengthInvalid(t *testing.T) { + // Empty true means parameter is required but Empty returns error + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be less than or equal to %v", c.Rule)), true) +} + +func TestValidateString_MaxLengthValid(t *testing.T) { + // Empty false means parameter is not required and Empty return nil + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: 7, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MaxLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MaxLength, + Rule: true, // must be int for maxLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_MinLengthInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 10, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value length must be greater than or equal to %v", c.Rule)), true) +} + +func TestValidateString_MinLengthValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("Hello"), c)) +} + +func TestValidateString_MinLengthRuleInvalid(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: MinLength, + Rule: true, // must be int for minLength + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateString_PatternInvalidPattern(t *testing.T) { + var x interface{} = "Hello" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[[:alnum:$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + "error parsing regexp: missing closing ]"), true) +} + +func TestValidateString_PatternMatch1(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^http://\w+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("http://masd"), c)) +} + +func TestValidateString_PatternMatch2(t *testing.T) { + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Nil(t, validateString(reflect.ValueOf("asdadad2323sad"), c)) +} + +func TestValidateString_PatternNotMatch(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: Pattern, + Rule: `^[a-zA-Z0-9]+$`, + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value doesn't match pattern %v", c.Rule)), true) +} + +func TestValidateString_InvalidConstraint(t *testing.T) { + var x interface{} = "asdad@@ad2323sad" + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: "^[a-zA-Z0-9]+$", + Chain: nil, + } + require.Equal(t, strings.Contains(validateString(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable to string type", c.Name)), true) +} + +func TestValidateFloat_InvalidConstraint(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3.0, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %v is not applicable for type float", c.Name)), true) +} + +func TestValidateFloat_InvalidRuleValue(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be float value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.4 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than %v", c.Rule)), true) +} + +func TestValidateFloat_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 2.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.2, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be less than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.0, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateFloat_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1.42 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.5, + Chain: nil, + } + require.Equal(t, strings.Contains(validateFloat(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("value must be greater than or equal to %v", c.Rule)), true) + +} + +func TestValidateFloat_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1.42, + Chain: nil, + } + require.Nil(t, validateFloat(reflect.ValueOf(1.42), c)) +} + +func TestValidateInt_InvalidConstraint(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: UniqueItems, + Rule: 3, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("constraint %s is not applicable for type integer", c.Name)), true) +} + +func TestValidateInt_InvalidRuleValue(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3.4, + Chain: nil, + } + require.Equal(t, strings.Contains(validateInt(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("rule must be integer value for %v constraint; got: %v", c.Name, c.Rule)), true) +} + +func TestValidateInt_ExclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(3), c)) +} + +func TestValidateInt_ExclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_ExclusiveMinimumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 2, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_exclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_exclusiveMaximumConstraintBoundary(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: ExclusiveMaximum, + Rule: 1, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: int64(2), + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_inclusiveMaximumConstraintInvalid(t *testing.T) { + var x interface{} = 2 + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: int64(1), + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be less than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_inclusiveMaximumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMaximum, + Rule: int64(1), + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintValid(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_InclusiveMinimumConstraintInvalid(t *testing.T) { + var x interface{} = 1 + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 2, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, fmt.Sprintf("value must be greater than or equal to %v", c.Rule)).Error()) +} + +func TestValidateInt_InclusiveMinimumConstraintBoundary(t *testing.T) { + c := Constraint{ + Target: "str", + Name: InclusiveMinimum, + Rule: 1, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(1), c)) +} + +func TestValidateInt_MultipleOfWithoutError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 10, + Chain: nil, + } + require.Nil(t, validateInt(reflect.ValueOf(2300), c)) +} + +func TestValidateInt_MultipleOfWithError(t *testing.T) { + c := Constraint{ + Target: "str", + Name: MultipleOf, + Rule: 11, + Chain: nil, + } + require.Equal(t, validateInt(reflect.ValueOf(2300), c).Error(), + createError(reflect.ValueOf(2300), c, fmt.Sprintf("value must be a multiple of %v", c.Rule)).Error()) +} + +func TestValidatePointer_NilTrue(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, // Required property + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, "value can not be null; required parameter").Error()) +} + +func TestValidatePointer_NilFalse(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: false, // not required property + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyValid(t *testing.T) { + var z *int + var x interface{} = z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_NilReadonlyInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: ReadOnly, + Rule: true, + Chain: nil, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c, "readonly parameter; must send as nil or empty in request").Error()) +} + +func TestValidatePointer_IntValid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_IntInvalid(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 11, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], "value must be greater than or equal to 11").Error()) +} + +func TestValidatePointer_IntInvalidConstraint(t *testing.T) { + z := 10 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3, + Chain: nil, + }, + }} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidConstraintInt64(t *testing.T) { + z := int64(10) + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3, + Chain: nil, + }, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type integer", MaxItems)).Error()) +} + +func TestValidatePointer_ValidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 3.0, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_InvalidFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: InclusiveMinimum, + Rule: 12.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + "value must be greater than or equal to 12").Error()) +} + +func TestValidatePointer_InvalidConstraintFloat(t *testing.T) { + z := 10.1 + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxItems, + Rule: 3.0, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(10.1), c.Chain[0], + fmt.Sprintf("constraint %v is not applicable for type float", MaxItems)).Error()) +} + +func TestValidatePointer_StringValid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: Pattern, + Rule: "^[a-z]+$", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidatePointer_StringInvalid(t *testing.T) { + z := "hello" + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: MaxLength, + Rule: 2, + Chain: nil, + }}} + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("hello"), c.Chain[0], + "value length must be less than or equal to 2").Error()) +} + +func TestValidatePointer_ArrayValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: "true", + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&[]string{"1", "2"}), c)) +} + +func TestValidatePointer_ArrayInvalid(t *testing.T) { + z := []string{"1", "2", "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(z), c.Chain[0], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", c.Target, z)).Error()) +} + +func TestValidatePointer_MapValid(t *testing.T) { + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{ + { + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}} + require.Nil(t, validatePtr(reflect.ValueOf(&map[interface{}]string{1: "1", "1": "2"}), c)) +} + +func TestValidatePointer_MapInvalid(t *testing.T) { + z := map[interface{}]string{1: "1", "1": "2", 1.3: "2"} + var x interface{} = &z + c := Constraint{ + Target: "ptr", + Name: Null, + Rule: true, + Chain: []Constraint{{ + Target: "ptr", + Name: UniqueItems, + Rule: true, + Chain: nil, + }}, + } + require.Equal(t, strings.Contains(validatePtr(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", c.Target)), true) +} + +type Child struct { + I string +} +type Product struct { + C *Child + Str *string + Name string + Arr *[]string + M *map[string]string + Num *int32 +} + +type Sample struct { + M *map[string]*string + Name string +} + +func TestValidatePointer_StructWithError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, "True", + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], + "value length must be less than or equal to 2").Error()) +} + +func TestValidatePointer_WithNilStruct(t *testing.T) { + var p *Product + var x interface{} = p + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + {"Str", MaxLength, 2, nil}, + {"Name", MaxLength, 5, nil}, + }, + } + require.Equal(t, validatePtr(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x), c, + fmt.Sprintf("value can not be null; required parameter")).Error()) +} + +func TestValidatePointer_StructWithNoError(t *testing.T) { + s := "hello" + var x interface{} = &Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "p", Null, true, + []Constraint{ + {"C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 5, nil}, + }}, + }}, + }, + } + require.Nil(t, validatePtr(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_FieldNotExist(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"Name", Empty, true, nil}, + }, + } + s = "Name" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(Child{"100"}), c.Chain[0], + fmt.Sprintf("field %q doesn't exist", s)).Error()) +} + +func TestValidateStruct_WithChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{"100"}, + Str: &s, + Name: "Gopher", + } + c := Constraint{ + "C", Null, true, + []Constraint{ + {"I", Empty, true, + []Constraint{ + {"I", MaxLength, 2, nil}, + }}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf("100"), c.Chain[0].Chain[0], "value length must be less than or equal to 2").Error()) +} + +func TestValidateStruct_WithoutChainConstraint(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + } + c := Constraint{"C", Null, true, + []Constraint{ + {"I", Empty, true, nil}, // throw error for Empty + }} + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(""), c.Chain[0], "value can not be null or empty; required parameter").Error()) +} + +func TestValidateStruct_WithArrayNull(t *testing.T) { + s := "hello" + var x interface{} = Product{ + C: &Child{""}, + Str: &s, + Name: "Gopher", + Arr: nil, + } + c := Constraint{"Arr", Null, true, + []Constraint{ + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }, + } + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(x.(Product).Arr), c, "value can not be null; required parameter").Error()) +} + +func TestValidateStruct_WithArrayEmptyError(t *testing.T) { + // arr := []string{} + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", MinItems, 2, nil}, + }} + + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[0], + fmt.Sprintf("value can not be null or empty; required parameter")).Error()) +} + +func TestValidateStruct_WithArrayEmptyWithoutError(t *testing.T) { + var x interface{} = Product{ + Arr: &[]string{}, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, false, nil}, + {"Arr", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_ArrayWithError(t *testing.T) { + arr := []string{"1", "1"} + var x interface{} = Product{ + Arr: &arr, + } + c := Constraint{ + "Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + } + s := "Arr" + require.Equal(t, validateStruct(reflect.ValueOf(x), c).Error(), + createError(reflect.ValueOf(*(x.(Product).Arr)), c.Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, *(x.(Product).Arr))).Error()) +} + +func TestValidateStruct_MapWithError(t *testing.T) { + m := map[string]string{ + "a": "hello", + "b": "hello", + } + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, true, nil}, + {"M", MaxItems, 4, nil}, + {"M", UniqueItems, true, nil}, + }, + } + + s := "M" + require.Equal(t, strings.Contains(validateStruct(reflect.ValueOf(x), c).Error(), + fmt.Sprintf("all items in parameter %q must be unique;", s)), true) +} + +func TestValidateStruct_MapWithNoError(t *testing.T) { + m := map[string]string{} + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, true, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidateStruct_MapNilNoError(t *testing.T) { + var m map[string]string + var x interface{} = Product{ + M: &m, + } + c := Constraint{ + "M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MaxItems, 4, nil}, + }, + } + require.Nil(t, validateStruct(reflect.ValueOf(x), c)) +} + +func TestValidate_MapValidationWithError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 2, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + + z := Validate(v).Error() + require.Equal(t, strings.Contains(z, "minimum item limit is 2; got: 1"), true) + require.Equal(t, strings.Contains(z, "MinItems"), true) +} + +func TestValidate_MapValidationWithoutError(t *testing.T) { + var x1 interface{} = &Product{ + Arr: &[]string{"1", "2"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = &Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"x1", Null, true, + []Constraint{ + {"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }, + }, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + {"M", Pattern, "^[a-z]+$", nil}, + }, + }, + }, + }}}, + {x2, + []Constraint{ + {"x2", Null, true, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + {"M", Pattern, "^[a-z]+$", nil}, + }, + }, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_UnknownType(t *testing.T) { + var c chan int + v := []Validation{ + {c, + []Constraint{{"c", Null, true, nil}}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(c), v[0].Constraints[0], + fmt.Sprintf("unknown type %v", reflect.ValueOf(c).Kind())).Error()) +} + +func TestValidate_example1(t *testing.T) { + var x1 interface{} = Product{ + Arr: &[]string{"1", "1"}, + M: &map[string]string{"a": "hello"}, + } + s := "hello" + var x2 interface{} = Sample{ + M: &map[string]*string{"a": &s}, + } + v := []Validation{ + {x1, + []Constraint{{"Arr", Null, true, + []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MaxItems, 4, nil}, + {"Arr", UniqueItems, true, nil}, + }}, + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + }}, + {x2, + []Constraint{ + {"M", Null, false, + []Constraint{ + {"M", Empty, false, nil}, + {"M", MinItems, 1, nil}, + {"M", UniqueItems, true, nil}, + }, + }, + {"Name", Empty, true, nil}, + }}, + } + s = "Arr" + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{"1", "1"}), v[0].Constraints[0].Chain[2], + fmt.Sprintf("all items in parameter %q must be unique; got:%v", s, []string{"1", "1"})).Error()) +} + +func TestValidate_Int(t *testing.T) { + n := int32(100) + v := []Validation{ + {n, + []Constraint{ + {"n", MultipleOf, 10, nil}, + {"n", ExclusiveMinimum, 100, nil}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[1], + "value must be greater than 100").Error()) +} + +func TestValidate_IntPointer(t *testing.T) { + n := int32(100) + p := &n + v := []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, true, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(v[0].TargetValue), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // Not required + p = nil + v = []Validation{ + {p, + []Constraint{ + {"p", Null, false, []Constraint{ + {"p", ExclusiveMinimum, 100, nil}, + }}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_IntStruct(t *testing.T) { + n := int32(100) + p := &Product{ + Num: &n, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, true, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(n), v[0].Constraints[0].Chain[0].Chain[0], + "value must be greater than 100").Error()) + + // required paramter + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Num", Null, true, []Constraint{ + {"p.Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Num), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Num", Null, false, []Constraint{ + {"Num", ExclusiveMinimum, 100, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_String(t *testing.T) { + s := "hello" + v := []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1].Chain[0], + "value length must be less than or equal to 3").Error()) + + // required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, true, nil}, + {"s", Empty, true, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[1], + "value can not be null or empty; required parameter").Error()) + + // not required paramter + s = "" + v = []Validation{ + {s, + []Constraint{ + {"s", Empty, false, nil}, + {"s", Empty, false, + []Constraint{{"s", MaxLength, 3, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StringStruct(t *testing.T) { + s := "hello" + p := &Product{ + Str: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + // e := ValidationError{ + // Constraint: MaxLength, + // Target: "Str", + // TargetValue: s, + // Details: fmt.Sprintf("value length must be less than 3", s), + // } + // if z := Validate(v); !reflect.DeepEqual(e, z) { + // t.Fatalf("autorest/validation: Validate failed to return error \nexpect: %v\ngot: %v", e, z) + // } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + "value length must be less than or equal to 3").Error()) + + // required paramter - can't be Empty + s = "" + p = &Product{ + Str: &s, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Str", Null, true, []Constraint{ + {"p.Str", Empty, true, nil}, + {"p.Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Str), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"Str", Null, false, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Str", Null, true, []Constraint{ + {"Str", Empty, true, nil}, + {"Str", MaxLength, 3, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_Array(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 []string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayPointer(t *testing.T) { + s := []string{"hello"} + v := []Validation{ + {&s, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}, + }}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // Empty array + v = []Validation{ + {&[]string{}, + []Constraint{ + {"s", Null, true, + []Constraint{ + {"s", Empty, true, nil}, + {"s", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // null array + var s1 *[]string + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, true, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s1), v[0].Constraints[0], + "value can not be null; required parameter").Error()) + + // not required paramter + v = []Validation{ + {s1, + []Constraint{ + {"s1", Null, false, + []Constraint{ + {"s1", Empty, true, nil}, + {"s1", MinItems, 2, nil}}}, + }, + }, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_ArrayInStruct(t *testing.T) { + s := []string{"hello"} + p := &Product{ + Arr: &s, + } + + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(s), v[0].Constraints[0].Chain[0].Chain[1], + fmt.Sprintf("minimum item limit is 2; got: %v", len(s))).Error()) + + // required paramter - can't be Empty + p = &Product{ + Arr: &[]string{}, + } + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf([]string{}), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{ + {"p.Arr", Null, true, []Constraint{ + {"p.Arr", Empty, true, nil}, + {"p.Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.Arr), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{ + {"Arr", Null, false, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{ + {"Arr", Null, true, []Constraint{ + {"Arr", Empty, true, nil}, + {"Arr", MinItems, 2, nil}, + }}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestValidate_StructInStruct(t *testing.T) { + p := &Product{ + C: &Child{I: "hello"}, + } + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", MinLength, 7, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value length must be greater than or equal to 7").Error()) + + // required paramter - can't be Empty + p = &Product{ + C: &Child{I: ""}, + } + + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C.I), v[0].Constraints[0].Chain[0].Chain[0], + "value can not be null or empty; required parameter").Error()) + + // required paramter - can't be null + p = &Product{} + v = []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"C", Null, true, + []Constraint{{"I", Empty, true, nil}}}, + }, + }}}, + } + require.Equal(t, Validate(v).Error(), + createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], + "value can not be null; required parameter").Error()) + + // Not required + v = []Validation{ + {&Product{}, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) + + // Parent not required + p = nil + v = []Validation{ + {p, []Constraint{{"p", Null, false, + []Constraint{{"p.C", Null, false, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + require.Nil(t, Validate(v)) +} + +func TestNewError(t *testing.T) { + p := &Product{} + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, true, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + err := createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], "value can not be null; required parameter") + z := fmt.Sprintf("batch.AccountClient#Create: Invalid input: %s", + err.Error()) + require.Equal(t, NewError("batch.AccountClient", "Create", err.Error()).Error(), z) +} + +func TestNewErrorWithValidationError(t *testing.T) { + p := &Product{} + v := []Validation{ + {p, []Constraint{{"p", Null, true, + []Constraint{{"p.C", Null, true, + []Constraint{{"p.C.I", Empty, true, nil}}}, + }, + }}}, + } + err := createError(reflect.ValueOf(p.C), v[0].Constraints[0].Chain[0], "value can not be null; required parameter") + z := fmt.Sprintf("batch.AccountClient#Create: Invalid input: %s", + err.Error()) + valError := NewErrorWithValidationError(err, "batch.AccountClient", "Create") + require.IsType(t, valError, Error{}) + require.Equal(t, valError.Error(), z) +} diff --git a/vendor/github.com/Azure/go-autorest/autorest/version.go b/vendor/github.com/Azure/go-autorest/autorest/version.go new file mode 100644 index 000000000000..3c6451546bae --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/autorest/version.go @@ -0,0 +1,22 @@ +package autorest + +import "github.com/Azure/go-autorest/version" + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Version returns the semantic version (see http://semver.org). +func Version() string { + return version.Number +} diff --git a/vendor/github.com/Azure/go-autorest/logger/logger.go b/vendor/github.com/Azure/go-autorest/logger/logger.go new file mode 100644 index 000000000000..756fd80cab16 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger.go @@ -0,0 +1,328 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" +) + +// LevelType tells a logger the minimum level to log. When code reports a log entry, +// the LogLevel indicates the level of the log entry. The logger only records entries +// whose level is at least the level it was told to log. See the Log* constants. +// For example, if a logger is configured with LogError, then LogError, LogPanic, +// and LogFatal entries will be logged; lower level entries are ignored. +type LevelType uint32 + +const ( + // LogNone tells a logger not to log any entries passed to it. + LogNone LevelType = iota + + // LogFatal tells a logger to log all LogFatal entries passed to it. + LogFatal + + // LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it. + LogPanic + + // LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it. + LogError + + // LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogWarning + + // LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogInfo + + // LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it. + LogDebug +) + +const ( + logNone = "NONE" + logFatal = "FATAL" + logPanic = "PANIC" + logError = "ERROR" + logWarning = "WARNING" + logInfo = "INFO" + logDebug = "DEBUG" + logUnknown = "UNKNOWN" +) + +// ParseLevel converts the specified string into the corresponding LevelType. +func ParseLevel(s string) (lt LevelType, err error) { + switch strings.ToUpper(s) { + case logFatal: + lt = LogFatal + case logPanic: + lt = LogPanic + case logError: + lt = LogError + case logWarning: + lt = LogWarning + case logInfo: + lt = LogInfo + case logDebug: + lt = LogDebug + default: + err = fmt.Errorf("bad log level '%s'", s) + } + return +} + +// String implements the stringer interface for LevelType. +func (lt LevelType) String() string { + switch lt { + case LogNone: + return logNone + case LogFatal: + return logFatal + case LogPanic: + return logPanic + case LogError: + return logError + case LogWarning: + return logWarning + case LogInfo: + return logInfo + case LogDebug: + return logDebug + default: + return logUnknown + } +} + +// Filter defines functions for filtering HTTP request/response content. +type Filter struct { + // URL returns a potentially modified string representation of a request URL. + URL func(u *url.URL) string + + // Header returns a potentially modified set of values for the specified key. + // To completely exclude the header key/values return false. + Header func(key string, val []string) (bool, []string) + + // Body returns a potentially modified request/response body. + Body func(b []byte) []byte +} + +func (f Filter) processURL(u *url.URL) string { + if f.URL == nil { + return u.String() + } + return f.URL(u) +} + +func (f Filter) processHeader(k string, val []string) (bool, []string) { + if f.Header == nil { + return true, val + } + return f.Header(k, val) +} + +func (f Filter) processBody(b []byte) []byte { + if f.Body == nil { + return b + } + return f.Body(b) +} + +// Writer defines methods for writing to a logging facility. +type Writer interface { + // Writeln writes the specified message with the standard log entry header and new-line character. + Writeln(level LevelType, message string) + + // Writef writes the specified format specifier with the standard log entry header and no new-line character. + Writef(level LevelType, format string, a ...interface{}) + + // WriteRequest writes the specified HTTP request to the logger if the log level is greater than + // or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no request content is excluded. + WriteRequest(req *http.Request, filter Filter) + + // WriteResponse writes the specified HTTP response to the logger if the log level is greater than + // or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher. + // Custom filters can be specified to exclude URL, header, and/or body content from the log. + // By default no respone content is excluded. + WriteResponse(resp *http.Response, filter Filter) +} + +// Instance is the default log writer initialized during package init. +// This can be replaced with a custom implementation as required. +var Instance Writer + +// default log level +var logLevel = LogNone + +// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL. +// If no value was specified the default value is LogNone. +// Custom loggers can call this to retrieve the configured log level. +func Level() LevelType { + return logLevel +} + +func init() { + // separated for testing purposes + initDefaultLogger() +} + +func initDefaultLogger() { + // init with nilLogger so callers don't have to do a nil check on Default + Instance = nilLogger{} + llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL")) + if llStr == "" { + return + } + var err error + logLevel, err = ParseLevel(llStr) + if err != nil { + fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error()) + return + } + if logLevel == LogNone { + return + } + // default to stderr + dest := os.Stderr + lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE") + if strings.EqualFold(lfStr, "stdout") { + dest = os.Stdout + } else if lfStr != "" { + lf, err := os.Create(lfStr) + if err == nil { + dest = lf + } else { + fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error()) + } + } + Instance = fileLogger{ + logLevel: logLevel, + mu: &sync.Mutex{}, + logFile: dest, + } +} + +// the nil logger does nothing +type nilLogger struct{} + +func (nilLogger) Writeln(LevelType, string) {} + +func (nilLogger) Writef(LevelType, string, ...interface{}) {} + +func (nilLogger) WriteRequest(*http.Request, Filter) {} + +func (nilLogger) WriteResponse(*http.Response, Filter) {} + +// A File is used instead of a Logger so the stream can be flushed after every write. +type fileLogger struct { + logLevel LevelType + mu *sync.Mutex // for synchronizing writes to logFile + logFile *os.File +} + +func (fl fileLogger) Writeln(level LevelType, message string) { + fl.Writef(level, "%s\n", message) +} + +func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) { + if fl.logLevel >= level { + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...)) + fl.logFile.Sync() + } +} + +func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) { + if req == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL)) + // dump headers + for k, v := range req.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(req.Header, req.Body) { + // dump body + body, err := ioutil.ReadAll(req.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + if nc, ok := req.Body.(io.Seeker); ok { + // rewind to the beginning + nc.Seek(0, io.SeekStart) + } else { + // recreate the body + req.Body = ioutil.NopCloser(bytes.NewReader(body)) + } + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) { + if resp == nil || fl.logLevel < LogInfo { + return + } + b := &bytes.Buffer{} + fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL)) + // dump headers + for k, v := range resp.Header { + if ok, mv := filter.processHeader(k, v); ok { + fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ",")) + } + } + if fl.shouldLogBody(resp.Header, resp.Body) { + // dump body + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err == nil { + fmt.Fprintln(b, string(filter.processBody(body))) + resp.Body = ioutil.NopCloser(bytes.NewReader(body)) + } else { + fmt.Fprintf(b, "failed to read body: %v\n", err) + } + } + fl.mu.Lock() + defer fl.mu.Unlock() + fmt.Fprint(fl.logFile, b.String()) + fl.logFile.Sync() +} + +// returns true if the provided body should be included in the log +func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool { + ct := header.Get("Content-Type") + return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1 +} + +// creates standard header for log entries, it contains a timestamp and the log level +func entryHeader(level LevelType) string { + // this format provides a fixed number of digits so the size of the timestamp is constant + return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String()) +} diff --git a/vendor/github.com/Azure/go-autorest/logger/logger_test.go b/vendor/github.com/Azure/go-autorest/logger/logger_test.go new file mode 100644 index 000000000000..5838b2524218 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/logger/logger_test.go @@ -0,0 +1,185 @@ +package logger + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "io/ioutil" + "net/http" + "os" + "path" + "regexp" + "strings" + "testing" +) + +func TestNilLogger(t *testing.T) { + // verify no crash with no logging + Instance.WriteRequest(nil, Filter{}) +} + +const ( + reqURL = "https://fakething/dot/com" + reqHeaderKey = "x-header" + reqHeaderVal = "value" + reqBody = "the request body" + respHeaderKey = "response-header" + respHeaderVal = "something" + respBody = "the response body" + logFileTimeStampRegex = `\(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{7}(-\d{2}:\d{2}|Z)\)` +) + +func TestLogReqRespNoBody(t *testing.T) { + err := os.Setenv("AZURE_GO_SDK_LOG_LEVEL", "info") + if err != nil { + t.Fatalf("failed to set log level: %v", err) + } + lf := path.Join(os.TempDir(), "testloggingbasic.log") + err = os.Setenv("AZURE_GO_SDK_LOG_FILE", lf) + if err != nil { + t.Fatalf("failed to set log file: %v", err) + } + initDefaultLogger() + if Level() != LogInfo { + t.Fatalf("wrong log level: %d", Level()) + } + // create mock request and response for logging + req, err := http.NewRequest(http.MethodGet, reqURL, nil) + if err != nil { + t.Fatalf("failed to create mock request: %v", err) + } + req.Header.Add(reqHeaderKey, reqHeaderVal) + Instance.WriteRequest(req, Filter{}) + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: req, + Header: http.Header{}, + } + resp.Header.Add(respHeaderKey, respHeaderVal) + Instance.WriteResponse(resp, Filter{}) + if fl, ok := Instance.(fileLogger); ok { + fl.logFile.Close() + } else { + t.Fatal("expected Instance to be fileLogger") + } + // parse log file to ensure contents match + b, err := ioutil.ReadFile(lf) + if err != nil { + t.Fatalf("failed to read log file: %v", err) + } + parts := strings.Split(string(b), "\n") + reqMatch := fmt.Sprintf("%s INFO: REQUEST: %s %s", logFileTimeStampRegex, req.Method, req.URL.String()) + respMatch := fmt.Sprintf("%s INFO: RESPONSE: %d %s", logFileTimeStampRegex, resp.StatusCode, resp.Request.URL.String()) + if !matchRegex(t, reqMatch, parts[0]) { + t.Fatalf("request header doesn't match: %s", parts[0]) + } + if !matchRegex(t, fmt.Sprintf("(?i)%s: %s", reqHeaderKey, reqHeaderVal), parts[1]) { + t.Fatalf("request header entry doesn't match: %s", parts[1]) + } + if !matchRegex(t, respMatch, parts[2]) { + t.Fatalf("response header doesn't match: %s", parts[2]) + } + if !matchRegex(t, fmt.Sprintf("(?i)%s: %s", respHeaderKey, respHeaderVal), parts[3]) { + t.Fatalf("response header value doesn't match: %s", parts[3]) + } + // disable logging + err = os.Setenv("AZURE_GO_SDK_LOG_LEVEL", "") + if err != nil { + t.Fatalf("failed to clear log level: %v", err) + } +} + +func TestLogReqRespWithBody(t *testing.T) { + err := os.Setenv("AZURE_GO_SDK_LOG_LEVEL", "debug") + if err != nil { + t.Fatalf("failed to set log level: %v", err) + } + lf := path.Join(os.TempDir(), "testloggingfull.log") + err = os.Setenv("AZURE_GO_SDK_LOG_FILE", lf) + if err != nil { + t.Fatalf("failed to set log file: %v", err) + } + initDefaultLogger() + if Level() != LogDebug { + t.Fatalf("wrong log level: %d", Level()) + } + // create mock request and response for logging + req, err := http.NewRequest(http.MethodGet, reqURL, strings.NewReader(reqBody)) + if err != nil { + t.Fatalf("failed to create mock request: %v", err) + } + req.Header.Add(reqHeaderKey, reqHeaderVal) + Instance.WriteRequest(req, Filter{}) + resp := &http.Response{ + Status: "200 OK", + StatusCode: 200, + Proto: "HTTP/1.0", + ProtoMajor: 1, + ProtoMinor: 0, + Request: req, + Header: http.Header{}, + Body: ioutil.NopCloser(strings.NewReader(respBody)), + } + resp.Header.Add(respHeaderKey, respHeaderVal) + Instance.WriteResponse(resp, Filter{}) + if fl, ok := Instance.(fileLogger); ok { + fl.logFile.Close() + } else { + t.Fatal("expected Instance to be fileLogger") + } + // parse log file to ensure contents match + b, err := ioutil.ReadFile(lf) + if err != nil { + t.Fatalf("failed to read log file: %v", err) + } + parts := strings.Split(string(b), "\n") + reqMatch := fmt.Sprintf("%s INFO: REQUEST: %s %s", logFileTimeStampRegex, req.Method, req.URL.String()) + respMatch := fmt.Sprintf("%s INFO: RESPONSE: %d %s", logFileTimeStampRegex, resp.StatusCode, resp.Request.URL.String()) + if !matchRegex(t, reqMatch, parts[0]) { + t.Fatalf("request header doesn't match: %s", parts[0]) + } + if !matchRegex(t, fmt.Sprintf("(?i)%s: %s", reqHeaderKey, reqHeaderVal), parts[1]) { + t.Fatalf("request header value doesn't match: %s", parts[1]) + } + if !matchRegex(t, reqBody, parts[2]) { + t.Fatalf("request body doesn't match: %s", parts[2]) + } + if !matchRegex(t, respMatch, parts[3]) { + t.Fatalf("response header doesn't match: %s", parts[3]) + } + if !matchRegex(t, fmt.Sprintf("(?i)%s: %s", respHeaderKey, respHeaderVal), parts[4]) { + t.Fatalf("response header value doesn't match: %s", parts[4]) + } + if !matchRegex(t, respBody, parts[5]) { + t.Fatalf("response body doesn't match: %s", parts[5]) + } + // disable logging + err = os.Setenv("AZURE_GO_SDK_LOG_LEVEL", "") + if err != nil { + t.Fatalf("failed to clear log level: %v", err) + } +} + +func matchRegex(t *testing.T, pattern, s string) bool { + match, err := regexp.MatchString(pattern, s) + if err != nil { + t.Fatalf("regexp failure: %v", err) + } + return match +} diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json new file mode 100644 index 000000000000..7fa689d55600 Binary files /dev/null and b/vendor/github.com/Azure/go-autorest/testdata/credsutf16be.json differ diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json new file mode 100644 index 000000000000..7951923e38cc Binary files /dev/null and b/vendor/github.com/Azure/go-autorest/testdata/credsutf16le.json differ diff --git a/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json b/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json new file mode 100644 index 000000000000..7d96bcfcb340 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/testdata/credsutf8.json @@ -0,0 +1,12 @@ +{ + "clientId": "client-id-123", + "clientSecret": "client-secret-456", + "subscriptionId": "sub-id-789", + "tenantId": "tenant-id-123", + "activeDirectoryEndpointUrl": "https://login.microsoftonline.com", + "resourceManagerEndpointUrl": "https://management.azure.com/", + "activeDirectoryGraphResourceId": "https://graph.windows.net/", + "sqlManagementEndpointUrl": "https://management.core.windows.net:8443/", + "galleryEndpointUrl": "https://gallery.azure.com/", + "managementEndpointUrl": "https://management.core.windows.net/" +} diff --git a/vendor/github.com/Azure/go-autorest/version/version.go b/vendor/github.com/Azure/go-autorest/version/version.go new file mode 100644 index 000000000000..a85b1213c1d2 --- /dev/null +++ b/vendor/github.com/Azure/go-autorest/version/version.go @@ -0,0 +1,37 @@ +package version + +// Copyright 2017 Microsoft Corporation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import ( + "fmt" + "runtime" +) + +// Number contains the semantic version of this SDK. +const Number = "v10.15.3" + +var ( + userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s", + runtime.Version(), + runtime.GOARCH, + runtime.GOOS, + Number, + ) +) + +// UserAgent returns a string containing the Go version, system archityecture and OS, and the go-autorest version. +func UserAgent() string { + return userAgent +} diff --git a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt index 899129ecc465..5f14d1162ed4 100644 --- a/vendor/github.com/aws/aws-sdk-go/NOTICE.txt +++ b/vendor/github.com/aws/aws-sdk-go/NOTICE.txt @@ -1,3 +1,3 @@ AWS SDK for Go -Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. +Copyright 2015 Amazon.com, Inc. or its affiliates. All Rights Reserved. Copyright 2014-2015 Stripe, Inc. diff --git a/vendor/github.com/dgrijalva/jwt-go/LICENSE b/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 000000000000..df83a9c2f019 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 000000000000..7fc1f793cbc4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/vendor/github.com/dgrijalva/jwt-go/README.md b/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 000000000000..add01037796d --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,100 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 000000000000..6370298313a6 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/vendor/github.com/dgrijalva/jwt-go/claims.go b/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 000000000000..f0228f02e033 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/vendor/github.com/dgrijalva/jwt-go/doc.go b/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 000000000000..a86dc1a3b348 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 000000000000..f977381240e3 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 000000000000..d19624b7264f --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/errors.go b/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 000000000000..1c93024aad2e --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/vendor/github.com/dgrijalva/jwt-go/hmac.go b/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 000000000000..addbe5d40182 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 000000000000..291213c460d4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/vendor/github.com/dgrijalva/jwt-go/none.go b/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 000000000000..f04d189d067b --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/vendor/github.com/dgrijalva/jwt-go/parser.go b/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 000000000000..d6901d9adb52 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa.go b/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 000000000000..e4caf1ca4a11 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 000000000000..10ee9db8a4ed --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 000000000000..a5ababf956c4 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 000000000000..ed1f212b21e1 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/vendor/github.com/dgrijalva/jwt-go/token.go b/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 000000000000..d637e0867c65 --- /dev/null +++ b/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/vendor/github.com/digitalocean/godo/LICENSE.txt b/vendor/github.com/digitalocean/godo/LICENSE.txt index 47f978eac1a5..43c5d2eee7d9 100644 --- a/vendor/github.com/digitalocean/godo/LICENSE.txt +++ b/vendor/github.com/digitalocean/godo/LICENSE.txt @@ -52,3 +52,4 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/ghodss/yaml/LICENSE b/vendor/github.com/ghodss/yaml/LICENSE new file mode 100644 index 000000000000..7805d36de730 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/LICENSE @@ -0,0 +1,50 @@ +The MIT License (MIT) + +Copyright (c) 2014 Sam Ghods + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/ghodss/yaml/README.md b/vendor/github.com/ghodss/yaml/README.md new file mode 100644 index 000000000000..0200f75b4d12 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/README.md @@ -0,0 +1,121 @@ +# YAML marshaling and unmarshaling support for Go + +[![Build Status](https://travis-ci.org/ghodss/yaml.svg)](https://travis-ci.org/ghodss/yaml) + +## Introduction + +A wrapper around [go-yaml](https://github.com/go-yaml/yaml) designed to enable a better way of handling YAML when marshaling to and from structs. + +In short, this library first converts YAML to JSON using go-yaml and then uses `json.Marshal` and `json.Unmarshal` to convert to or from the struct. This means that it effectively reuses the JSON struct tags as well as the custom JSON methods `MarshalJSON` and `UnmarshalJSON` unlike go-yaml. For a detailed overview of the rationale behind this method, [see this blog post](http://ghodss.com/2014/the-right-way-to-handle-yaml-in-golang/). + +## Compatibility + +This package uses [go-yaml](https://github.com/go-yaml/yaml) and therefore supports [everything go-yaml supports](https://github.com/go-yaml/yaml#compatibility). + +## Caveats + +**Caveat #1:** When using `yaml.Marshal` and `yaml.Unmarshal`, binary data should NOT be preceded with the `!!binary` YAML tag. If you do, go-yaml will convert the binary data from base64 to native binary data, which is not compatible with JSON. You can still use binary in your YAML files though - just store them without the `!!binary` tag and decode the base64 in your code (e.g. in the custom JSON methods `MarshalJSON` and `UnmarshalJSON`). This also has the benefit that your YAML and your JSON binary data will be decoded exactly the same way. As an example: + +``` +BAD: + exampleKey: !!binary gIGC + +GOOD: + exampleKey: gIGC +... and decode the base64 data in your code. +``` + +**Caveat #2:** When using `YAMLToJSON` directly, maps with keys that are maps will result in an error since this is not supported by JSON. This error will occur in `Unmarshal` as well since you can't unmarshal map keys anyways since struct fields can't be keys. + +## Installation and usage + +To install, run: + +``` +$ go get github.com/ghodss/yaml +``` + +And import using: + +``` +import "github.com/ghodss/yaml" +``` + +Usage is very similar to the JSON library: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +type Person struct { + Name string `json:"name"` // Affects YAML field names too. + Age int `json:"age"` +} + +func main() { + // Marshal a Person struct to YAML. + p := Person{"John", 30} + y, err := yaml.Marshal(p) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + age: 30 + name: John + */ + + // Unmarshal the YAML back into a Person struct. + var p2 Person + err = yaml.Unmarshal(y, &p2) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(p2) + /* Output: + {John 30} + */ +} +``` + +`yaml.YAMLToJSON` and `yaml.JSONToYAML` methods are also available: + +```go +package main + +import ( + "fmt" + + "github.com/ghodss/yaml" +) + +func main() { + j := []byte(`{"name": "John", "age": 30}`) + y, err := yaml.JSONToYAML(j) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(y)) + /* Output: + name: John + age: 30 + */ + j2, err := yaml.YAMLToJSON(y) + if err != nil { + fmt.Printf("err: %v\n", err) + return + } + fmt.Println(string(j2)) + /* Output: + {"age":30,"name":"John"} + */ +} +``` diff --git a/vendor/github.com/ghodss/yaml/fields.go b/vendor/github.com/ghodss/yaml/fields.go new file mode 100644 index 000000000000..58600740266c --- /dev/null +++ b/vendor/github.com/ghodss/yaml/fields.go @@ -0,0 +1,501 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package yaml + +import ( + "bytes" + "encoding" + "encoding/json" + "reflect" + "sort" + "strings" + "sync" + "unicode" + "unicode/utf8" +) + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +// if it encounters an Unmarshaler, indirect stops and returns that. +// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. +func indirect(v reflect.Value, decodingNull bool) (json.Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { + break + } + if v.IsNil() { + if v.CanSet() { + v.Set(reflect.New(v.Type().Elem())) + } else { + v = reflect.New(v.Type().Elem()) + } + } + if v.Type().NumMethod() > 0 { + if u, ok := v.Interface().(json.Unmarshaler); ok { + return u, nil, reflect.Value{} + } + if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { + return nil, u, reflect.Value{} + } + } + v = v.Elem() + } + return nil, nil, v +} + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from json tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that JSON should recognize for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" { // unexported + continue + } + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + name, opts := parseTag(tag) + if !isValidTag(name) { + name = "" + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + quoted: opts.Contains("string"), + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with JSON tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// JSON tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} + +// tagOptions is the string following a comma in a struct field's "json" +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +// parseTag splits a struct field's json tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +// Contains reports whether a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/github.com/ghodss/yaml/yaml.go b/vendor/github.com/ghodss/yaml/yaml.go new file mode 100644 index 000000000000..6e7f14fc7fb7 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml.go @@ -0,0 +1,305 @@ +package yaml + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "strconv" + + "gopkg.in/yaml.v2" +) + +// Marshals the object into JSON then converts JSON to YAML and returns the +// YAML. +func Marshal(o interface{}) ([]byte, error) { + j, err := json.Marshal(o) + if err != nil { + return nil, fmt.Errorf("error marshaling into JSON: %v", err) + } + + y, err := JSONToYAML(j) + if err != nil { + return nil, fmt.Errorf("error converting JSON to YAML: %v", err) + } + + return y, nil +} + +// JSONOpt is a decoding option for decoding from JSON format. +type JSONOpt func(*json.Decoder) *json.Decoder + +// Unmarshal converts YAML to JSON then uses JSON to unmarshal into an object, +// optionally configuring the behavior of the JSON unmarshal. +func Unmarshal(y []byte, o interface{}, opts ...JSONOpt) error { + vo := reflect.ValueOf(o) + j, err := yamlToJSON(y, &vo, yaml.Unmarshal) + if err != nil { + return fmt.Errorf("error converting YAML to JSON: %v", err) + } + + err = jsonUnmarshal(bytes.NewReader(j), o, opts...) + if err != nil { + return fmt.Errorf("error unmarshaling JSON: %v", err) + } + + return nil +} + +// jsonUnmarshal unmarshals the JSON byte stream from the given reader into the +// object, optionally applying decoder options prior to decoding. We are not +// using json.Unmarshal directly as we want the chance to pass in non-default +// options. +func jsonUnmarshal(r io.Reader, o interface{}, opts ...JSONOpt) error { + d := json.NewDecoder(r) + for _, opt := range opts { + d = opt(d) + } + if err := d.Decode(&o); err != nil { + return fmt.Errorf("while decoding JSON: %v", err) + } + return nil +} + +// Convert JSON to YAML. +func JSONToYAML(j []byte) ([]byte, error) { + // Convert the JSON to an object. + var jsonObj interface{} + // We are using yaml.Unmarshal here (instead of json.Unmarshal) because the + // Go JSON library doesn't try to pick the right number type (int, float, + // etc.) when unmarshalling to interface{}, it just picks float64 + // universally. go-yaml does go through the effort of picking the right + // number type, so we can preserve number type throughout this process. + err := yaml.Unmarshal(j, &jsonObj) + if err != nil { + return nil, err + } + + // Marshal this object into YAML. + return yaml.Marshal(jsonObj) +} + +// YAMLToJSON converts YAML to JSON. Since JSON is a subset of YAML, +// passing JSON through this method should be a no-op. +// +// Things YAML can do that are not supported by JSON: +// * In YAML you can have binary and null keys in your maps. These are invalid +// in JSON. (int and float keys are converted to strings.) +// * Binary data in YAML with the !!binary tag is not supported. If you want to +// use binary data with this library, encode the data as base64 as usual but do +// not use the !!binary tag in your YAML. This will ensure the original base64 +// encoded data makes it all the way through to the JSON. +// +// For strict decoding of YAML, use YAMLToJSONStrict. +func YAMLToJSON(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.Unmarshal) +} + +// YAMLToJSONStrict is like YAMLToJSON but enables strict YAML decoding, +// returning an error on any duplicate field names. +func YAMLToJSONStrict(y []byte) ([]byte, error) { + return yamlToJSON(y, nil, yaml.UnmarshalStrict) +} + +func yamlToJSON(y []byte, jsonTarget *reflect.Value, yamlUnmarshal func([]byte, interface{}) error) ([]byte, error) { + // Convert the YAML to an object. + var yamlObj interface{} + err := yamlUnmarshal(y, &yamlObj) + if err != nil { + return nil, err + } + + // YAML objects are not completely compatible with JSON objects (e.g. you + // can have non-string keys in YAML). So, convert the YAML-compatible object + // to a JSON-compatible object, failing with an error if irrecoverable + // incompatibilties happen along the way. + jsonObj, err := convertToJSONableObject(yamlObj, jsonTarget) + if err != nil { + return nil, err + } + + // Convert this object to JSON and return the data. + return json.Marshal(jsonObj) +} + +func convertToJSONableObject(yamlObj interface{}, jsonTarget *reflect.Value) (interface{}, error) { + var err error + + // Resolve jsonTarget to a concrete value (i.e. not a pointer or an + // interface). We pass decodingNull as false because we're not actually + // decoding into the value, we're just checking if the ultimate target is a + // string. + if jsonTarget != nil { + ju, tu, pv := indirect(*jsonTarget, false) + // We have a JSON or Text Umarshaler at this level, so we can't be trying + // to decode into a string. + if ju != nil || tu != nil { + jsonTarget = nil + } else { + jsonTarget = &pv + } + } + + // If yamlObj is a number or a boolean, check if jsonTarget is a string - + // if so, coerce. Else return normal. + // If yamlObj is a map or array, find the field that each key is + // unmarshaling to, and when you recurse pass the reflect.Value for that + // field back into this function. + switch typedYAMLObj := yamlObj.(type) { + case map[interface{}]interface{}: + // JSON does not support arbitrary keys in a map, so we must convert + // these keys to strings. + // + // From my reading of go-yaml v2 (specifically the resolve function), + // keys can only have the types string, int, int64, float64, binary + // (unsupported), or null (unsupported). + strMap := make(map[string]interface{}) + for k, v := range typedYAMLObj { + // Resolve the key to a string first. + var keyString string + switch typedKey := k.(type) { + case string: + keyString = typedKey + case int: + keyString = strconv.Itoa(typedKey) + case int64: + // go-yaml will only return an int64 as a key if the system + // architecture is 32-bit and the key's value is between 32-bit + // and 64-bit. Otherwise the key type will simply be int. + keyString = strconv.FormatInt(typedKey, 10) + case float64: + // Stolen from go-yaml to use the same conversion to string as + // the go-yaml library uses to convert float to string when + // Marshaling. + s := strconv.FormatFloat(typedKey, 'g', -1, 32) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + keyString = s + case bool: + if typedKey { + keyString = "true" + } else { + keyString = "false" + } + default: + return nil, fmt.Errorf("Unsupported map key of type: %s, key: %+#v, value: %+#v", + reflect.TypeOf(k), k, v) + } + + // jsonTarget should be a struct or a map. If it's a struct, find + // the field it's going to map to and pass its reflect.Value. If + // it's a map, find the element type of the map and pass the + // reflect.Value created from that type. If it's neither, just pass + // nil - JSON conversion will error for us if it's a real issue. + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Struct { + keyBytes := []byte(keyString) + // Find the field that the JSON library would use. + var f *field + fields := cachedTypeFields(t.Type()) + for i := range fields { + ff := &fields[i] + if bytes.Equal(ff.nameBytes, keyBytes) { + f = ff + break + } + // Do case-insensitive comparison. + if f == nil && ff.equalFold(ff.nameBytes, keyBytes) { + f = ff + } + } + if f != nil { + // Find the reflect.Value of the most preferential + // struct field. + jtf := t.Field(f.index[0]) + strMap[keyString], err = convertToJSONableObject(v, &jtf) + if err != nil { + return nil, err + } + continue + } + } else if t.Kind() == reflect.Map { + // Create a zero value of the map's element type to use as + // the JSON target. + jtv := reflect.Zero(t.Type().Elem()) + strMap[keyString], err = convertToJSONableObject(v, &jtv) + if err != nil { + return nil, err + } + continue + } + } + strMap[keyString], err = convertToJSONableObject(v, nil) + if err != nil { + return nil, err + } + } + return strMap, nil + case []interface{}: + // We need to recurse into arrays in case there are any + // map[interface{}]interface{}'s inside and to convert any + // numbers to strings. + + // If jsonTarget is a slice (which it really should be), find the + // thing it's going to map to. If it's not a slice, just pass nil + // - JSON conversion will error for us if it's a real issue. + var jsonSliceElemValue *reflect.Value + if jsonTarget != nil { + t := *jsonTarget + if t.Kind() == reflect.Slice { + // By default slices point to nil, but we need a reflect.Value + // pointing to a value of the slice type, so we create one here. + ev := reflect.Indirect(reflect.New(t.Type().Elem())) + jsonSliceElemValue = &ev + } + } + + // Make and use a new array. + arr := make([]interface{}, len(typedYAMLObj)) + for i, v := range typedYAMLObj { + arr[i], err = convertToJSONableObject(v, jsonSliceElemValue) + if err != nil { + return nil, err + } + } + return arr, nil + default: + // If the target type is a string and the YAML type is a number, + // convert the YAML type to a string. + if jsonTarget != nil && (*jsonTarget).Kind() == reflect.String { + // Based on my reading of go-yaml, it may return int, int64, + // float64, or uint64. + var s string + switch typedVal := typedYAMLObj.(type) { + case int: + s = strconv.FormatInt(int64(typedVal), 10) + case int64: + s = strconv.FormatInt(typedVal, 10) + case float64: + s = strconv.FormatFloat(typedVal, 'g', -1, 32) + case uint64: + s = strconv.FormatUint(typedVal, 10) + case bool: + if typedVal { + s = "true" + } else { + s = "false" + } + } + if len(s) > 0 { + yamlObj = interface{}(s) + } + } + return yamlObj, nil + } + + return nil, nil +} diff --git a/vendor/github.com/ghodss/yaml/yaml_go110.go b/vendor/github.com/ghodss/yaml/yaml_go110.go new file mode 100644 index 000000000000..ab3e06a222a6 --- /dev/null +++ b/vendor/github.com/ghodss/yaml/yaml_go110.go @@ -0,0 +1,14 @@ +// This file contains changes that are only compatible with go 1.10 and onwards. + +// +build go1.10 + +package yaml + +import "encoding/json" + +// DisallowUnknownFields configures the JSON decoder to error out if unknown +// fields come along, instead of dropping them by default. +func DisallowUnknownFields(d *json.Decoder) *json.Decoder { + d.DisallowUnknownFields() + return d +} diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE new file mode 100644 index 000000000000..f57de90da8ac --- /dev/null +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -0,0 +1,35 @@ +Copyright (c) 2013, The GoGo Authors. All rights reserved. + +Protocol Buffers for Go with Gadgets + +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile new file mode 100644 index 000000000000..00d65f327732 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C test_proto + make -C proto3_proto + make diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 000000000000..a26b046d94f1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,258 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go new file mode 100644 index 000000000000..24552483c6ce --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go @@ -0,0 +1,39 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "reflect" + +type custom interface { + Marshal() ([]byte, error) + Unmarshal(data []byte) error + Size() int +} + +var customType = reflect.TypeOf((*custom)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 000000000000..d9aa3c42d666 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,428 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go new file mode 100644 index 000000000000..fe1bd7d904e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + default: // E.g., *pb.T + discardInfo := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + discardInfo.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 000000000000..93464c91cffb --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000000..e748e1730e1c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 000000000000..4c35d337380c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,218 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 000000000000..0f5fb173e9fd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,33 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 000000000000..d4db5a1c1457 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 000000000000..44ebd457cf61 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,604 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + case extensionsBytes: + return slowExtensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if ea, ok := pbi.(slowExtensionAdapter); ok { + pbi = ea.extensionsBytes + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, ok := pb.(extensionsBytes); ok { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + return decodeExtensionFromBytes(extension, *ext) + } + + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if cerr := checkExtensionTypes(epb, extension); cerr != nil { + return nil, cerr + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, ok := pb.(extensionsBytes); ok { + newb, err := encodeExtension(extension, value) + if err != nil { + return err + } + bb := epb.GetExtensions() + *bb = append(*bb, newb...) + return nil + } + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 000000000000..53ebd8cca01c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,368 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "sort" + "strings" + "sync" +) + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +type slowExtensionAdapter struct { + extensionsBytes +} + +func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension { + panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.") +} + +func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + b := s.GetExtensions() + m, err := BytesToExtensionsMap(*b) + if err != nil { + panic(err) + } + return m, notLocker{} +} + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + if err := this.Encode(); err != nil { + return false + } + if err := that.Encode(); err != nil { + return false + } + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + if err := this.Encode(); err != nil { + return 1 + } + if err := that.Encode(); err != nil { + return -1 + } + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + info := getMarshalInfo(reflect.TypeOf(m)) + return info.sizeV1Extensions(m.extensionsWrite()) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + o := 0 + for _, e := range m { + if err := e.Encode(); err != nil { + return 0, err + } + n := copy(data[o:], e.enc) + if n != len(e.enc) { + return 0, io.ErrShortBuffer + } + o += n + } + return o, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + e := m[id] + if err := e.Encode(); err != nil { + return nil, err + } + return e.enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) { + u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType)) + ei := u.getExtElemInfo(extension) + v := value + p := toAddrPointer(&v, ei.isptr) + siz := ei.sizer(p, SizeVarint(ei.wiretag)) + buf := make([]byte, 0, siz) + return ei.marshaler(buf, p, ei.wiretag, false) +} + +func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) { + o := 0 + for o < len(buf) { + tag, n := DecodeVarint((buf)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + if o+n > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + l, err := size((buf)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + if o+n+l > len(buf) { + return nil, fmt.Errorf("unable to decode extension") + } + v, err := decodeExtension((buf)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) +} + +func (this *Extension) Encode() error { + if this.enc == nil { + var err error + this.enc, err = encodeExtension(this.desc, this.value) + if err != nil { + return err + } + } + return nil +} + +func (this Extension) GoString() string { + if err := this.Encode(); err != nil { + return fmt.Sprintf("error encoding extension: %v", err) + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 000000000000..37d17813288b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,929 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "errors" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +var errInvalidUTF8 = errors.New("proto: invalid UTF-8 string") + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or T or []*T or []T + switch f.Kind() { + case reflect.Struct: + setDefaults(f, recur, zeros) + + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.Kind() == reflect.Ptr && e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Struct: + nestedMessage = true // non-nullable + + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr, reflect.Struct: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 000000000000..b3aa39190a13 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,50 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +type Sizer interface { + Size() int +} + +type ProtoSizer interface { + ProtoSize() int +} + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 000000000000..3b6ca41d5e55 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,314 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "sync" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + return marshalMessageSet(exts, false) +} + +// marshaMessageSet implements above function, with the opt to turn on / off deterministic during Marshal. +func marshalMessageSet(exts interface{}, deterministic bool) ([]byte, error) { + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var u marshalInfo + siz := u.sizeMessageSet(exts) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, exts, deterministic) + + case map[int32]Extension: + // This is an old-style extension map. + // Wrap it in a new-style XXX_InternalExtensions. + ie := XXX_InternalExtensions{ + p: &struct { + mu sync.Mutex + extensionMap map[int32]Extension + }{ + extensionMap: exts, + }, + } + + var u marshalInfo + siz := u.sizeMessageSet(&ie) + b := make([]byte, 0, siz) + return u.appendMessageSet(b, &ie, deterministic) + + default: + return nil, errors.New("proto: not an extension map") + } +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + var mu sync.Locker + m, mu = exts.extensionsRead() + if m != nil { + // Keep the extensions map locked until we're done marshaling to prevent + // races between marshaling and unmarshaling the lazily-{en,de}coded + // values. + mu.Lock() + defer mu.Unlock() + } + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + + if i > 0 && b.Len() > 1 { + b.WriteByte(',') + } + + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000000..b6cad90834b3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,357 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000000..7ffd3c29d90c --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,59 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" +) + +// TODO: untested, so probably incorrect. + +func (p pointer) getRef() pointer { + return pointer{v: p.v.Addr()} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000000..d55a335d9453 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,308 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr bool) pointer { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } + // The interface is not of pointer type. The data word is the pointer + // to the data. + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 000000000000..aca8eed02a11 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,56 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func (p pointer) getRef() pointer { + return pointer{p: (unsafe.Pointer)(&p.p)} +} + +func (p pointer) appendRef(v pointer, typ reflect.Type) { + slice := p.getSlice(typ) + elem := v.asPointerTo(typ).Elem() + newSlice := reflect.Append(slice, elem) + slice.Set(newSlice) +} + +func (p pointer) getSlice(typ reflect.Type) reflect.Value { + sliceTyp := reflect.SliceOf(typ) + slice := p.asPointerTo(sliceTyp) + slice = slice.Elem() + return slice +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 000000000000..7a5e28efe519 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,600 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool + + stype reflect.Type // set for struct types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.ctype = typ + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + case reflect.Struct: + p.stype = typ + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + case reflect.Struct: + p.stype = t3 + } + case reflect.Struct: + p.stype = t2 + } + + case reflect.Map: + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + _, _, _, oots = om.XXX_OneofFuncs() + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 000000000000..40ea3dd935c2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,36 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem() +var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem() diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 000000000000..5a5fd93f7c19 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go new file mode 100644 index 000000000000..b479760a84b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go @@ -0,0 +1,2803 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements + + hassizer bool // has custom sizer + hasprotosizer bool // has custom protosizer + + bytesExtensions field // offset of XXX_extensions where the field type is []byte +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + // Uses the message's Size method if available + if u.hassizer { + s := ptr.asPointerTo(u.typ).Interface().(Sizer) + return s.Size() + } + // Uses the message's ProtoSize method if available + if u.hasprotosizer { + s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer) + return s.ProtoSize() + } + + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + n += len(s) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + if deterministic { + return nil, errors.New("proto: deterministic not supported by the Marshal method of " + u.typ.String()) + } + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errreq error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + if u.bytesExtensions.IsValid() { + s := *ptr.offset(u.bytesExtensions).toBytes() + b = append(b, s...) + } + for _, f := range u.fields { + if f.required && errreq == nil { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + errreq = &RequiredNotSetError{f.name} + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + err = fmt.Errorf("proto: string field %q contains invalid UTF-8", fullName) + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errreq +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.bytesExtensions = invalidField + u.sizecache = invalidField + isOneofMessage := false + + if reflect.PtrTo(t).Implements(sizerType) { + u.hassizer = true + } + if reflect.PtrTo(t).Implements(protosizerType) { + u.hasprotosizer = true + } + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Tag.Get("protobuf_oneof") != "" { + isOneofMessage = true + } + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + if f.Type.Kind() == reflect.Map { + u.v1extensions = toField(&f) + } else { + u.bytesExtensions = toField(&f) + } + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // get oneof implementers + var oneofImplementers []interface{} + // gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler + if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok && isOneofMessage { + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(t, tags, false, false) + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + isptr: t.Kind() == reflect.Ptr, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = 1<<31 - 1 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizr, + marshaler: marshalr, + } + } +} + +type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + ctype := false + isTime := false + isDuration := false + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + if strings.HasPrefix(tags[i], "customtype=") { + ctype = true + } + if tags[i] == "stdtime" { + isTime = true + } + if tags[i] == "stdduration" { + isDuration = true + } + } + if !proto3 && !pointer && !slice { + nozero = false + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + if pointer { + return makeCustomPtrMarshaler(getMarshalInfo(t)) + } + return makeCustomMarshaler(getMarshalInfo(t)) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeTimePtrSliceMarshaler(getMarshalInfo(t)) + } + return makeTimePtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeTimeSliceMarshaler(getMarshalInfo(t)) + } + return makeTimeMarshaler(getMarshalInfo(t)) + } + + if isDuration { + if pointer { + if slice { + return makeDurationPtrSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationPtrMarshaler(getMarshalInfo(t)) + } + if slice { + return makeDurationSliceMarshaler(getMarshalInfo(t)) + } + return makeDurationMarshaler(getMarshalInfo(t)) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if pointer { + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } else { + if slice { + return makeMessageRefSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageRefMarshaler(getMarshalInfo(t)) + } + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err, errreq error + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, errreq + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + tags := strings.Split(f.Tag.Get("protobuf"), ",") + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tags { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if err != nil { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != nil && err != ErrNil { // allow nil value in map + return b, err + } + } + return b, nil + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if err != nil { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if err != nil { + return b, err + } + } + return b, nil +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if err != nil { + return b, err + } + } + return b, nil +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + var b []byte + b, err = m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go new file mode 100644 index 000000000000..997f57c1e102 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go @@ -0,0 +1,388 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +// makeMessageRefMarshaler differs a bit from makeMessageMarshaler +// It marshal a message T instead of a *T +func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + siz := u.size(ptr) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + b = appendVarint(b, wiretag) + siz := u.cachedsize(ptr) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, ptr, deterministic) + } +} + +// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler +// It marshals a slice of messages []T instead of []*T +func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + var err, errreq error + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + e := elem.Interface() + v := toAddrPointer(&e, false) + b = appendVarint(b, wiretag) + siz := u.size(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if err != nil { + if _, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errreq == nil { + errreq = err + } + continue + } + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + + return b, errreq + } +} + +func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(u.typ).Interface().(custom) + siz := m.Size() + buf, err := m.Marshal() + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + t := ptr.asPointerTo(u.typ).Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(time.Time) + ts, err := timestampProto(t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return 0 + } + siz := Size(ts) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + t := elem.Interface().(*time.Time) + ts, err := timestampProto(*t) + if err != nil { + return nil, err + } + siz := Size(ts) + buf, err := Marshal(ts) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + d := ptr.asPointerTo(u.typ).Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + if ptr.isNil() { + return 0 + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + return tagsize + SizeVarint(uint64(siz)) + siz + }, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + if ptr.isNil() { + return b, nil + } + d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration) + dur := durationProto(*d) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(buf))) + b = append(b, buf...) + return b, nil + } +} + +func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(u.typ) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(u.typ) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(time.Duration) + dur := durationProto(d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} + +func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + n := 0 + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getSlice(reflect.PtrTo(u.typ)) + for i := 0; i < s.Len(); i++ { + elem := s.Index(i) + d := elem.Interface().(*time.Duration) + dur := durationProto(*d) + siz := Size(dur) + buf, err := Marshal(dur) + if err != nil { + return nil, err + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(siz)) + b = append(b, buf...) + } + + return b, nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go new file mode 100644 index 000000000000..f520106e09f5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go @@ -0,0 +1,657 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + mergeInfo.merge(dst, src) + } + case isSlice: // E.g., []*pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mergeInfo.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mergeInfo := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mergeInfo.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go new file mode 100644 index 000000000000..b6371bb56bd4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2058 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + rnse = r + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + if u.bytesExtensions.IsValid() { + z = m.offset(u.bytesExtensions).toBytes() + break + } + panic("no extensions field available") + } + } + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if rnse != nil { + // A required field of a submessage/group is missing. Return that error. + return rnse + } + if reqMask != u.reqMask { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + return &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return nil +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + u.bytesExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) { + u.oldExtensions = toField(&f) + continue + } else if f.Type == reflect.TypeOf(([]byte)(nil)) { + u.bytesExtensions = toField(&f) + continue + } + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + // TODO: XXX_OneofFuncs returns more info than we need. Get rid of some of it? + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("XXX_OneofFuncs") + // gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler + if fn.IsValid() && len(oneofFields) > 0 { + res := fn.Call(nil)[3] // last return value from XXX_OneofFuncs: []interface{} + for i := res.Len() - 1; i >= 0; i-- { + v := res.Index(i) // interface{} + tptr := reflect.ValueOf(v.Interface()).Type() // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + } + } + + // Get extension ranges, if any. + fn = reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + ctype := false + isTime := false + isDuration := false + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if strings.HasPrefix(tag, "customtype=") { + ctype = true + } + if tag == "stdtime" { + isTime = true + } + if tag == "stdduration" { + isDuration = true + } + } + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + if ctype { + if reflect.PtrTo(t).Implements(customType) { + if slice { + return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name) + } + if pointer { + return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalCustom(getUnmarshalInfo(t), name) + } else { + panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t)) + } + } + + if isTime { + if pointer { + if slice { + return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTimePtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalTime(getUnmarshalInfo(t), name) + } + + if isDuration { + if pointer { + if slice { + return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name) + } + if slice { + return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalDuration(getUnmarshalInfo(t), name) + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessage(getUnmarshalInfo(t), name) + } + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + if !utf8.ValidString(v) { + return nil, errInvalidUTF8 + } + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + tagArray := strings.Split(f.Tag.Get("protobuf"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + for _, t := range tagArray { + if strings.HasPrefix(t, "customtype=") { + valTags = append(valTags, t) + } + if t == "stdtime" { + valTags = append(valTags, t) + } + if t == "stdduration" { + valTags = append(valTags, t) + } + } + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ",")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if err == nil { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nil + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if err != nil { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nil + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) <= 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go new file mode 100644 index 000000000000..00d6c7ad9376 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go @@ -0,0 +1,385 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2018, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "io" + "reflect" +) + +func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f // gogo: changed from v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.New(sub.typ)) + m := s.Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := reflect.New(sub.typ) + c := m.Interface().(custom) + if err := c.Unmarshal(b[:x]); err != nil { + return nil, err + } + v := valToPointer(m) + f.appendRef(v, sub.typ) + return b[x:], nil + } +} + +func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + + m := f.asPointerTo(sub.typ).Interface().(custom) + if err := m.Unmarshal(b[:x]); err != nil { + return nil, err + } + return b[x:], nil + } +} + +func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&t)) + return b[x:], nil + } +} + +func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := ×tamp{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + t, err := timestampFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(t)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem() + s.Set(reflect.ValueOf(&d)) + return b[x:], nil + } +} + +func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + s := f.asPointerTo(sub.typ).Elem() + s.Set(reflect.ValueOf(d)) + return b[x:], nil + } +} + +func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(reflect.PtrTo(sub.typ)) + newSlice := reflect.Append(slice, reflect.ValueOf(&d)) + slice.Set(newSlice) + return b[x:], nil + } +} + +func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return nil, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + m := &duration{} + if err := Unmarshal(b[:x], m); err != nil { + return nil, err + } + d, err := durationFromProto(m) + if err != nil { + return nil, err + } + slice := f.getSlice(sub.typ) + newSlice := reflect.Append(slice, reflect.ValueOf(d)) + slice.Set(newSlice) + return b[x:], nil + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 000000000000..4f5706dc5f36 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 000000000000..1d6c6aa0e41b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 000000000000..fbb000d3742a --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,998 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000000..9324f6542bcf --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000000..38439fa99013 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,49 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000000..ceadde6a5e10 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/golang/glog/LICENSE b/vendor/github.com/golang/glog/LICENSE new file mode 100644 index 000000000000..37ec93a14fdc --- /dev/null +++ b/vendor/github.com/golang/glog/LICENSE @@ -0,0 +1,191 @@ +Apache License +Version 2.0, January 2004 +http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + +"License" shall mean the terms and conditions for use, reproduction, and +distribution as defined by Sections 1 through 9 of this document. + +"Licensor" shall mean the copyright owner or entity authorized by the copyright +owner that is granting the License. + +"Legal Entity" shall mean the union of the acting entity and all other entities +that control, are controlled by, or are under common control with that entity. +For the purposes of this definition, "control" means (i) the power, direct or +indirect, to cause the direction or management of such entity, whether by +contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the +outstanding shares, or (iii) beneficial ownership of such entity. + +"You" (or "Your") shall mean an individual or Legal Entity exercising +permissions granted by this License. + +"Source" form shall mean the preferred form for making modifications, including +but not limited to software source code, documentation source, and configuration +files. + +"Object" form shall mean any form resulting from mechanical transformation or +translation of a Source form, including but not limited to compiled object code, +generated documentation, and conversions to other media types. + +"Work" shall mean the work of authorship, whether in Source or Object form, made +available under the License, as indicated by a copyright notice that is included +in or attached to the work (an example is provided in the Appendix below). + +"Derivative Works" shall mean any work, whether in Source or Object form, that +is based on (or derived from) the Work and for which the editorial revisions, +annotations, elaborations, or other modifications represent, as a whole, an +original work of authorship. For the purposes of this License, Derivative Works +shall not include works that remain separable from, or merely link (or bind by +name) to the interfaces of, the Work and Derivative Works thereof. + +"Contribution" shall mean any work of authorship, including the original version +of the Work and any modifications or additions to that Work or Derivative Works +thereof, that is intentionally submitted to Licensor for inclusion in the Work +by the copyright owner or by an individual or Legal Entity authorized to submit +on behalf of the copyright owner. For the purposes of this definition, +"submitted" means any form of electronic, verbal, or written communication sent +to the Licensor or its representatives, including but not limited to +communication on electronic mailing lists, source code control systems, and +issue tracking systems that are managed by, or on behalf of, the Licensor for +the purpose of discussing and improving the Work, but excluding communication +that is conspicuously marked or otherwise designated in writing by the copyright +owner as "Not a Contribution." + +"Contributor" shall mean Licensor and any individual or Legal Entity on behalf +of whom a Contribution has been received by Licensor and subsequently +incorporated within the Work. + +2. Grant of Copyright License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable copyright license to reproduce, prepare Derivative Works of, +publicly display, publicly perform, sublicense, and distribute the Work and such +Derivative Works in Source or Object form. + +3. Grant of Patent License. + +Subject to the terms and conditions of this License, each Contributor hereby +grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, +irrevocable (except as stated in this section) patent license to make, have +made, use, offer to sell, sell, import, and otherwise transfer the Work, where +such license applies only to those patent claims licensable by such Contributor +that are necessarily infringed by their Contribution(s) alone or by combination +of their Contribution(s) with the Work to which such Contribution(s) was +submitted. If You institute patent litigation against any entity (including a +cross-claim or counterclaim in a lawsuit) alleging that the Work or a +Contribution incorporated within the Work constitutes direct or contributory +patent infringement, then any patent licenses granted to You under this License +for that Work shall terminate as of the date such litigation is filed. + +4. Redistribution. + +You may reproduce and distribute copies of the Work or Derivative Works thereof +in any medium, with or without modifications, and in Source or Object form, +provided that You meet the following conditions: + +You must give any other recipients of the Work or Derivative Works a copy of +this License; and +You must cause any modified files to carry prominent notices stating that You +changed the files; and +You must retain, in the Source form of any Derivative Works that You distribute, +all copyright, patent, trademark, and attribution notices from the Source form +of the Work, excluding those notices that do not pertain to any part of the +Derivative Works; and +If the Work includes a "NOTICE" text file as part of its distribution, then any +Derivative Works that You distribute must include a readable copy of the +attribution notices contained within such NOTICE file, excluding those notices +that do not pertain to any part of the Derivative Works, in at least one of the +following places: within a NOTICE text file distributed as part of the +Derivative Works; within the Source form or documentation, if provided along +with the Derivative Works; or, within a display generated by the Derivative +Works, if and wherever such third-party notices normally appear. The contents of +the NOTICE file are for informational purposes only and do not modify the +License. You may add Your own attribution notices within Derivative Works that +You distribute, alongside or as an addendum to the NOTICE text from the Work, +provided that such additional attribution notices cannot be construed as +modifying the License. +You may add Your own copyright statement to Your modifications and may provide +additional or different license terms and conditions for use, reproduction, or +distribution of Your modifications, or for any such Derivative Works as a whole, +provided Your use, reproduction, and distribution of the Work otherwise complies +with the conditions stated in this License. + +5. Submission of Contributions. + +Unless You explicitly state otherwise, any Contribution intentionally submitted +for inclusion in the Work by You to the Licensor shall be under the terms and +conditions of this License, without any additional terms or conditions. +Notwithstanding the above, nothing herein shall supersede or modify the terms of +any separate license agreement you may have executed with Licensor regarding +such Contributions. + +6. Trademarks. + +This License does not grant permission to use the trade names, trademarks, +service marks, or product names of the Licensor, except as required for +reasonable and customary use in describing the origin of the Work and +reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. + +Unless required by applicable law or agreed to in writing, Licensor provides the +Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, +including, without limitation, any warranties or conditions of TITLE, +NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are +solely responsible for determining the appropriateness of using or +redistributing the Work and assume any risks associated with Your exercise of +permissions under this License. + +8. Limitation of Liability. + +In no event and under no legal theory, whether in tort (including negligence), +contract, or otherwise, unless required by applicable law (such as deliberate +and grossly negligent acts) or agreed to in writing, shall any Contributor be +liable to You for damages, including any direct, indirect, special, incidental, +or consequential damages of any character arising as a result of this License or +out of the use or inability to use the Work (including but not limited to +damages for loss of goodwill, work stoppage, computer failure or malfunction, or +any and all other commercial damages or losses), even if such Contributor has +been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. + +While redistributing the Work or Derivative Works thereof, You may choose to +offer, and charge a fee for, acceptance of support, warranty, indemnity, or +other liability obligations and/or rights consistent with this License. However, +in accepting such obligations, You may act only on Your own behalf and on Your +sole responsibility, not on behalf of any other Contributor, and only if You +agree to indemnify, defend, and hold each Contributor harmless for any liability +incurred by, or claims asserted against, such Contributor by reason of your +accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work + +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/golang/glog/README b/vendor/github.com/golang/glog/README new file mode 100644 index 000000000000..387b4eb68909 --- /dev/null +++ b/vendor/github.com/golang/glog/README @@ -0,0 +1,44 @@ +glog +==== + +Leveled execution logs for Go. + +This is an efficient pure Go implementation of leveled logs in the +manner of the open source C++ package + https://github.com/google/glog + +By binding methods to booleans it is possible to use the log package +without paying the expense of evaluating the arguments to the log. +Through the -vmodule flag, the package also provides fine-grained +control over logging at the file level. + +The comment from glog.go introduces the ideas: + + Package glog implements logging analogous to the Google-internal + C++ INFO/ERROR/V setup. It provides functions Info, Warning, + Error, Fatal, plus formatting variants such as Infof. It + also provides V-style logging controlled by the -v and + -vmodule=file=2 flags. + + Basic examples: + + glog.Info("Prepare to repel boarders") + + glog.Fatalf("Initialization failed: %s", err) + + See the documentation for the V function for an explanation + of these examples: + + if glog.V(2) { + glog.Info("Starting transaction...") + } + + glog.V(2).Infoln("Processed", nItems, "elements") + + +The repository contains an open source version of the log package +used inside Google. The master copy of the source lives inside +Google, not here. The code in this repo is for export only and is not itself +under development. Feature requests will be ignored. + +Send bug reports to golang-nuts@googlegroups.com. diff --git a/vendor/github.com/golang/glog/glog.go b/vendor/github.com/golang/glog/glog.go new file mode 100644 index 000000000000..54bd7afdcabe --- /dev/null +++ b/vendor/github.com/golang/glog/glog.go @@ -0,0 +1,1180 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package glog implements logging analogous to the Google-internal C++ INFO/ERROR/V setup. +// It provides functions Info, Warning, Error, Fatal, plus formatting variants such as +// Infof. It also provides V-style logging controlled by the -v and -vmodule=file=2 flags. +// +// Basic examples: +// +// glog.Info("Prepare to repel boarders") +// +// glog.Fatalf("Initialization failed: %s", err) +// +// See the documentation for the V function for an explanation of these examples: +// +// if glog.V(2) { +// glog.Info("Starting transaction...") +// } +// +// glog.V(2).Infoln("Processed", nItems, "elements") +// +// Log output is buffered and written periodically using Flush. Programs +// should call Flush before exiting to guarantee all log output is written. +// +// By default, all log statements write to files in a temporary directory. +// This package provides several flags that modify this behavior. +// As a result, flag.Parse must be called before any logging is done. +// +// -logtostderr=false +// Logs are written to standard error instead of to files. +// -alsologtostderr=false +// Logs are written to standard error as well as to files. +// -stderrthreshold=ERROR +// Log events at or above this severity are logged to standard +// error as well as to files. +// -log_dir="" +// Log files will be written to this directory instead of the +// default temporary directory. +// +// Other flags provide aids to debugging. +// +// -log_backtrace_at="" +// When set to a file and line number holding a logging statement, +// such as +// -log_backtrace_at=gopherflakes.go:234 +// a stack trace will be written to the Info log whenever execution +// hits that statement. (Unlike with -vmodule, the ".go" must be +// present.) +// -v=0 +// Enable V-leveled logging at the specified level. +// -vmodule="" +// The syntax of the argument is a comma-separated list of pattern=N, +// where pattern is a literal file name (minus the ".go" suffix) or +// "glob" pattern and N is a V level. For instance, +// -vmodule=gopher*=3 +// sets the V level to 3 in all Go files whose names begin "gopher". +// +package glog + +import ( + "bufio" + "bytes" + "errors" + "flag" + "fmt" + "io" + stdLog "log" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// severity identifies the sort of log: info, warning etc. It also implements +// the flag.Value interface. The -stderrthreshold flag is of type severity and +// should be modified only through the flag.Value interface. The values match +// the corresponding constants in C++. +type severity int32 // sync/atomic int32 + +// These constants identify the log levels in order of increasing severity. +// A message written to a high-severity log file is also written to each +// lower-severity log file. +const ( + infoLog severity = iota + warningLog + errorLog + fatalLog + numSeverity = 4 +) + +const severityChar = "IWEF" + +var severityName = []string{ + infoLog: "INFO", + warningLog: "WARNING", + errorLog: "ERROR", + fatalLog: "FATAL", +} + +// get returns the value of the severity. +func (s *severity) get() severity { + return severity(atomic.LoadInt32((*int32)(s))) +} + +// set sets the value of the severity. +func (s *severity) set(val severity) { + atomic.StoreInt32((*int32)(s), int32(val)) +} + +// String is part of the flag.Value interface. +func (s *severity) String() string { + return strconv.FormatInt(int64(*s), 10) +} + +// Get is part of the flag.Value interface. +func (s *severity) Get() interface{} { + return *s +} + +// Set is part of the flag.Value interface. +func (s *severity) Set(value string) error { + var threshold severity + // Is it a known name? + if v, ok := severityByName(value); ok { + threshold = v + } else { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + threshold = severity(v) + } + logging.stderrThreshold.set(threshold) + return nil +} + +func severityByName(s string) (severity, bool) { + s = strings.ToUpper(s) + for i, name := range severityName { + if name == s { + return severity(i), true + } + } + return 0, false +} + +// OutputStats tracks the number of output lines and bytes written. +type OutputStats struct { + lines int64 + bytes int64 +} + +// Lines returns the number of lines written. +func (s *OutputStats) Lines() int64 { + return atomic.LoadInt64(&s.lines) +} + +// Bytes returns the number of bytes written. +func (s *OutputStats) Bytes() int64 { + return atomic.LoadInt64(&s.bytes) +} + +// Stats tracks the number of lines of output and number of bytes +// per severity level. Values must be read with atomic.LoadInt64. +var Stats struct { + Info, Warning, Error OutputStats +} + +var severityStats = [numSeverity]*OutputStats{ + infoLog: &Stats.Info, + warningLog: &Stats.Warning, + errorLog: &Stats.Error, +} + +// Level is exported because it appears in the arguments to V and is +// the type of the v flag, which can be set programmatically. +// It's a distinct type because we want to discriminate it from logType. +// Variables of type level are only changed under logging.mu. +// The -v flag is read only with atomic ops, so the state of the logging +// module is consistent. + +// Level is treated as a sync/atomic int32. + +// Level specifies a level of verbosity for V logs. *Level implements +// flag.Value; the -v flag is of type Level and should be modified +// only through the flag.Value interface. +type Level int32 + +// get returns the value of the Level. +func (l *Level) get() Level { + return Level(atomic.LoadInt32((*int32)(l))) +} + +// set sets the value of the Level. +func (l *Level) set(val Level) { + atomic.StoreInt32((*int32)(l), int32(val)) +} + +// String is part of the flag.Value interface. +func (l *Level) String() string { + return strconv.FormatInt(int64(*l), 10) +} + +// Get is part of the flag.Value interface. +func (l *Level) Get() interface{} { + return *l +} + +// Set is part of the flag.Value interface. +func (l *Level) Set(value string) error { + v, err := strconv.Atoi(value) + if err != nil { + return err + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(Level(v), logging.vmodule.filter, false) + return nil +} + +// moduleSpec represents the setting of the -vmodule flag. +type moduleSpec struct { + filter []modulePat +} + +// modulePat contains a filter for the -vmodule flag. +// It holds a verbosity level and a file pattern to match. +type modulePat struct { + pattern string + literal bool // The pattern is a literal string + level Level +} + +// match reports whether the file matches the pattern. It uses a string +// comparison if the pattern contains no metacharacters. +func (m *modulePat) match(file string) bool { + if m.literal { + return file == m.pattern + } + match, _ := filepath.Match(m.pattern, file) + return match +} + +func (m *moduleSpec) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + var b bytes.Buffer + for i, f := range m.filter { + if i > 0 { + b.WriteRune(',') + } + fmt.Fprintf(&b, "%s=%d", f.pattern, f.level) + } + return b.String() +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported. +func (m *moduleSpec) Get() interface{} { + return nil +} + +var errVmoduleSyntax = errors.New("syntax error: expect comma-separated list of filename=N") + +// Syntax: -vmodule=recordio=2,file=1,gfs*=3 +func (m *moduleSpec) Set(value string) error { + var filter []modulePat + for _, pat := range strings.Split(value, ",") { + if len(pat) == 0 { + // Empty strings such as from a trailing comma can be ignored. + continue + } + patLev := strings.Split(pat, "=") + if len(patLev) != 2 || len(patLev[0]) == 0 || len(patLev[1]) == 0 { + return errVmoduleSyntax + } + pattern := patLev[0] + v, err := strconv.Atoi(patLev[1]) + if err != nil { + return errors.New("syntax error: expect comma-separated list of filename=N") + } + if v < 0 { + return errors.New("negative value for vmodule level") + } + if v == 0 { + continue // Ignore. It's harmless but no point in paying the overhead. + } + // TODO: check syntax of filter? + filter = append(filter, modulePat{pattern, isLiteral(pattern), Level(v)}) + } + logging.mu.Lock() + defer logging.mu.Unlock() + logging.setVState(logging.verbosity, filter, true) + return nil +} + +// isLiteral reports whether the pattern is a literal string, that is, has no metacharacters +// that require filepath.Match to be called to match the pattern. +func isLiteral(pattern string) bool { + return !strings.ContainsAny(pattern, `\*?[]`) +} + +// traceLocation represents the setting of the -log_backtrace_at flag. +type traceLocation struct { + file string + line int +} + +// isSet reports whether the trace location has been specified. +// logging.mu is held. +func (t *traceLocation) isSet() bool { + return t.line > 0 +} + +// match reports whether the specified file and line matches the trace location. +// The argument file name is the full path, not the basename specified in the flag. +// logging.mu is held. +func (t *traceLocation) match(file string, line int) bool { + if t.line != line { + return false + } + if i := strings.LastIndex(file, "/"); i >= 0 { + file = file[i+1:] + } + return t.file == file +} + +func (t *traceLocation) String() string { + // Lock because the type is not atomic. TODO: clean this up. + logging.mu.Lock() + defer logging.mu.Unlock() + return fmt.Sprintf("%s:%d", t.file, t.line) +} + +// Get is part of the (Go 1.2) flag.Getter interface. It always returns nil for this flag type since the +// struct is not exported +func (t *traceLocation) Get() interface{} { + return nil +} + +var errTraceSyntax = errors.New("syntax error: expect file.go:234") + +// Syntax: -log_backtrace_at=gopherflakes.go:234 +// Note that unlike vmodule the file extension is included here. +func (t *traceLocation) Set(value string) error { + if value == "" { + // Unset. + t.line = 0 + t.file = "" + } + fields := strings.Split(value, ":") + if len(fields) != 2 { + return errTraceSyntax + } + file, line := fields[0], fields[1] + if !strings.Contains(file, ".") { + return errTraceSyntax + } + v, err := strconv.Atoi(line) + if err != nil { + return errTraceSyntax + } + if v <= 0 { + return errors.New("negative or zero value for level") + } + logging.mu.Lock() + defer logging.mu.Unlock() + t.line = v + t.file = file + return nil +} + +// flushSyncWriter is the interface satisfied by logging destinations. +type flushSyncWriter interface { + Flush() error + Sync() error + io.Writer +} + +func init() { + flag.BoolVar(&logging.toStderr, "logtostderr", false, "log to standard error instead of files") + flag.BoolVar(&logging.alsoToStderr, "alsologtostderr", false, "log to standard error as well as files") + flag.Var(&logging.verbosity, "v", "log level for V logs") + flag.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") + flag.Var(&logging.vmodule, "vmodule", "comma-separated list of pattern=N settings for file-filtered logging") + flag.Var(&logging.traceLocation, "log_backtrace_at", "when logging hits line file:N, emit a stack trace") + + // Default stderrThreshold is ERROR. + logging.stderrThreshold = errorLog + + logging.setVState(0, nil, false) + go logging.flushDaemon() +} + +// Flush flushes all pending log I/O. +func Flush() { + logging.lockAndFlushAll() +} + +// loggingT collects all the global state of the logging setup. +type loggingT struct { + // Boolean flags. Not handled atomically because the flag.Value interface + // does not let us avoid the =true, and that shorthand is necessary for + // compatibility. TODO: does this matter enough to fix? Seems unlikely. + toStderr bool // The -logtostderr flag. + alsoToStderr bool // The -alsologtostderr flag. + + // Level flag. Handled atomically. + stderrThreshold severity // The -stderrthreshold flag. + + // freeList is a list of byte buffers, maintained under freeListMu. + freeList *buffer + // freeListMu maintains the free list. It is separate from the main mutex + // so buffers can be grabbed and printed to without holding the main lock, + // for better parallelization. + freeListMu sync.Mutex + + // mu protects the remaining elements of this structure and is + // used to synchronize logging. + mu sync.Mutex + // file holds writer for each of the log types. + file [numSeverity]flushSyncWriter + // pcs is used in V to avoid an allocation when computing the caller's PC. + pcs [1]uintptr + // vmap is a cache of the V Level for each V() call site, identified by PC. + // It is wiped whenever the vmodule flag changes state. + vmap map[uintptr]Level + // filterLength stores the length of the vmodule filter chain. If greater + // than zero, it means vmodule is enabled. It may be read safely + // using sync.LoadInt32, but is only modified under mu. + filterLength int32 + // traceLocation is the state of the -log_backtrace_at flag. + traceLocation traceLocation + // These flags are modified only under lock, although verbosity may be fetched + // safely using atomic.LoadInt32. + vmodule moduleSpec // The state of the -vmodule flag. + verbosity Level // V logging level, the value of the -v flag/ +} + +// buffer holds a byte Buffer for reuse. The zero value is ready for use. +type buffer struct { + bytes.Buffer + tmp [64]byte // temporary byte array for creating headers. + next *buffer +} + +var logging loggingT + +// setVState sets a consistent state for V logging. +// l.mu is held. +func (l *loggingT) setVState(verbosity Level, filter []modulePat, setFilter bool) { + // Turn verbosity off so V will not fire while we are in transition. + logging.verbosity.set(0) + // Ditto for filter length. + atomic.StoreInt32(&logging.filterLength, 0) + + // Set the new filters and wipe the pc->Level map if the filter has changed. + if setFilter { + logging.vmodule.filter = filter + logging.vmap = make(map[uintptr]Level) + } + + // Things are consistent now, so enable filtering and verbosity. + // They are enabled in order opposite to that in V. + atomic.StoreInt32(&logging.filterLength, int32(len(filter))) + logging.verbosity.set(verbosity) +} + +// getBuffer returns a new, ready-to-use buffer. +func (l *loggingT) getBuffer() *buffer { + l.freeListMu.Lock() + b := l.freeList + if b != nil { + l.freeList = b.next + } + l.freeListMu.Unlock() + if b == nil { + b = new(buffer) + } else { + b.next = nil + b.Reset() + } + return b +} + +// putBuffer returns a buffer to the free list. +func (l *loggingT) putBuffer(b *buffer) { + if b.Len() >= 256 { + // Let big buffers die a natural death. + return + } + l.freeListMu.Lock() + b.next = l.freeList + l.freeList = b + l.freeListMu.Unlock() +} + +var timeNow = time.Now // Stubbed out for testing. + +/* +header formats a log header as defined by the C++ implementation. +It returns a buffer containing the formatted header and the user's file and line number. +The depth specifies how many stack frames above lives the source line to be identified in the log message. + +Log lines have this form: + Lmmdd hh:mm:ss.uuuuuu threadid file:line] msg... +where the fields are defined as follows: + L A single character, representing the log level (eg 'I' for INFO) + mm The month (zero padded; ie May is '05') + dd The day (zero padded) + hh:mm:ss.uuuuuu Time in hours, minutes and fractional seconds + threadid The space-padded thread ID as returned by GetTID() + file The file name + line The line number + msg The user-supplied message +*/ +func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { + _, file, line, ok := runtime.Caller(3 + depth) + if !ok { + file = "???" + line = 1 + } else { + slash := strings.LastIndex(file, "/") + if slash >= 0 { + file = file[slash+1:] + } + } + return l.formatHeader(s, file, line), file, line +} + +// formatHeader formats a log header using the provided file name and line number. +func (l *loggingT) formatHeader(s severity, file string, line int) *buffer { + now := timeNow() + if line < 0 { + line = 0 // not a real line number, but acceptable to someDigits + } + if s > fatalLog { + s = infoLog // for safety. + } + buf := l.getBuffer() + + // Avoid Fprintf, for speed. The format is so simple that we can do it quickly by hand. + // It's worth about 3X. Fprintf is hard. + _, month, day := now.Date() + hour, minute, second := now.Clock() + // Lmmdd hh:mm:ss.uuuuuu threadid file:line] + buf.tmp[0] = severityChar[s] + buf.twoDigits(1, int(month)) + buf.twoDigits(3, day) + buf.tmp[5] = ' ' + buf.twoDigits(6, hour) + buf.tmp[8] = ':' + buf.twoDigits(9, minute) + buf.tmp[11] = ':' + buf.twoDigits(12, second) + buf.tmp[14] = '.' + buf.nDigits(6, 15, now.Nanosecond()/1000, '0') + buf.tmp[21] = ' ' + buf.nDigits(7, 22, pid, ' ') // TODO: should be TID + buf.tmp[29] = ' ' + buf.Write(buf.tmp[:30]) + buf.WriteString(file) + buf.tmp[0] = ':' + n := buf.someDigits(1, line) + buf.tmp[n+1] = ']' + buf.tmp[n+2] = ' ' + buf.Write(buf.tmp[:n+3]) + return buf +} + +// Some custom tiny helper functions to print the log header efficiently. + +const digits = "0123456789" + +// twoDigits formats a zero-prefixed two-digit integer at buf.tmp[i]. +func (buf *buffer) twoDigits(i, d int) { + buf.tmp[i+1] = digits[d%10] + d /= 10 + buf.tmp[i] = digits[d%10] +} + +// nDigits formats an n-digit integer at buf.tmp[i], +// padding with pad on the left. +// It assumes d >= 0. +func (buf *buffer) nDigits(n, i, d int, pad byte) { + j := n - 1 + for ; j >= 0 && d > 0; j-- { + buf.tmp[i+j] = digits[d%10] + d /= 10 + } + for ; j >= 0; j-- { + buf.tmp[i+j] = pad + } +} + +// someDigits formats a zero-prefixed variable-width integer at buf.tmp[i]. +func (buf *buffer) someDigits(i, d int) int { + // Print into the top, then copy down. We know there's space for at least + // a 10-digit number. + j := len(buf.tmp) + for { + j-- + buf.tmp[j] = digits[d%10] + d /= 10 + if d == 0 { + break + } + } + return copy(buf.tmp[i:], buf.tmp[j:]) +} + +func (l *loggingT) println(s severity, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintln(buf, args...) + l.output(s, buf, file, line, false) +} + +func (l *loggingT) print(s severity, args ...interface{}) { + l.printDepth(s, 1, args...) +} + +func (l *loggingT) printDepth(s severity, depth int, args ...interface{}) { + buf, file, line := l.header(s, depth) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +func (l *loggingT) printf(s severity, format string, args ...interface{}) { + buf, file, line := l.header(s, 0) + fmt.Fprintf(buf, format, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, false) +} + +// printWithFileLine behaves like print but uses the provided file and line number. If +// alsoLogToStderr is true, the log message always appears on standard error; it +// will also appear in the log file unless --logtostderr is set. +func (l *loggingT) printWithFileLine(s severity, file string, line int, alsoToStderr bool, args ...interface{}) { + buf := l.formatHeader(s, file, line) + fmt.Fprint(buf, args...) + if buf.Bytes()[buf.Len()-1] != '\n' { + buf.WriteByte('\n') + } + l.output(s, buf, file, line, alsoToStderr) +} + +// output writes the data to the log files and releases the buffer. +func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoToStderr bool) { + l.mu.Lock() + if l.traceLocation.isSet() { + if l.traceLocation.match(file, line) { + buf.Write(stacks(false)) + } + } + data := buf.Bytes() + if !flag.Parsed() { + os.Stderr.Write([]byte("ERROR: logging before flag.Parse: ")) + os.Stderr.Write(data) + } else if l.toStderr { + os.Stderr.Write(data) + } else { + if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { + os.Stderr.Write(data) + } + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } + } + if s == fatalLog { + // If we got here via Exit rather than Fatal, print no stacks. + if atomic.LoadUint32(&fatalNoStacks) > 0 { + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(1) + } + // Dump all goroutine stacks before exiting. + // First, make sure we see the trace for the current goroutine on standard error. + // If -logtostderr has been specified, the loop below will do that anyway + // as the first stack in the full dump. + if !l.toStderr { + os.Stderr.Write(stacks(false)) + } + // Write the stack trace for all goroutines to the files. + trace := stacks(true) + logExitFunc = func(error) {} // If we get a write error, we'll still exit below. + for log := fatalLog; log >= infoLog; log-- { + if f := l.file[log]; f != nil { // Can be nil if -logtostderr is set. + f.Write(trace) + } + } + l.mu.Unlock() + timeoutFlush(10 * time.Second) + os.Exit(255) // C++ uses -1, which is silly because it's anded with 255 anyway. + } + l.putBuffer(buf) + l.mu.Unlock() + if stats := severityStats[s]; stats != nil { + atomic.AddInt64(&stats.lines, 1) + atomic.AddInt64(&stats.bytes, int64(len(data))) + } +} + +// timeoutFlush calls Flush and returns when it completes or after timeout +// elapses, whichever happens first. This is needed because the hooks invoked +// by Flush may deadlock when glog.Fatal is called from a hook that holds +// a lock. +func timeoutFlush(timeout time.Duration) { + done := make(chan bool, 1) + go func() { + Flush() // calls logging.lockAndFlushAll() + done <- true + }() + select { + case <-done: + case <-time.After(timeout): + fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + } +} + +// stacks is a wrapper for runtime.Stack that attempts to recover the data for all goroutines. +func stacks(all bool) []byte { + // We don't know how big the traces are, so grow a few times if they don't fit. Start large, though. + n := 10000 + if all { + n = 100000 + } + var trace []byte + for i := 0; i < 5; i++ { + trace = make([]byte, n) + nbytes := runtime.Stack(trace, all) + if nbytes < len(trace) { + return trace[:nbytes] + } + n *= 2 + } + return trace +} + +// logExitFunc provides a simple mechanism to override the default behavior +// of exiting on error. Used in testing and to guarantee we reach a required exit +// for fatal logs. Instead, exit could be a function rather than a method but that +// would make its use clumsier. +var logExitFunc func(error) + +// exit is called if there is trouble creating or writing log files. +// It flushes the logs and exits the program; there's no point in hanging around. +// l.mu is held. +func (l *loggingT) exit(err error) { + fmt.Fprintf(os.Stderr, "log: exiting because of error: %s\n", err) + // If logExitFunc is set, we do that instead of exiting. + if logExitFunc != nil { + logExitFunc(err) + return + } + l.flushAll() + os.Exit(2) +} + +// syncBuffer joins a bufio.Writer to its underlying file, providing access to the +// file's Sync method and providing a wrapper for the Write method that provides log +// file rotation. There are conflicting methods, so the file cannot be embedded. +// l.mu is held for all its methods. +type syncBuffer struct { + logger *loggingT + *bufio.Writer + file *os.File + sev severity + nbytes uint64 // The number of bytes written to this file +} + +func (sb *syncBuffer) Sync() error { + return sb.file.Sync() +} + +func (sb *syncBuffer) Write(p []byte) (n int, err error) { + if sb.nbytes+uint64(len(p)) >= MaxSize { + if err := sb.rotateFile(time.Now()); err != nil { + sb.logger.exit(err) + } + } + n, err = sb.Writer.Write(p) + sb.nbytes += uint64(n) + if err != nil { + sb.logger.exit(err) + } + return +} + +// rotateFile closes the syncBuffer's file and starts a new one. +func (sb *syncBuffer) rotateFile(now time.Time) error { + if sb.file != nil { + sb.Flush() + sb.file.Close() + } + var err error + sb.file, _, err = create(severityName[sb.sev], now) + sb.nbytes = 0 + if err != nil { + return err + } + + sb.Writer = bufio.NewWriterSize(sb.file, bufferSize) + + // Write header. + var buf bytes.Buffer + fmt.Fprintf(&buf, "Log file created at: %s\n", now.Format("2006/01/02 15:04:05")) + fmt.Fprintf(&buf, "Running on machine: %s\n", host) + fmt.Fprintf(&buf, "Binary: Built with %s %s for %s/%s\n", runtime.Compiler, runtime.Version(), runtime.GOOS, runtime.GOARCH) + fmt.Fprintf(&buf, "Log line format: [IWEF]mmdd hh:mm:ss.uuuuuu threadid file:line] msg\n") + n, err := sb.file.Write(buf.Bytes()) + sb.nbytes += uint64(n) + return err +} + +// bufferSize sizes the buffer associated with each log file. It's large +// so that log records can accumulate without the logging thread blocking +// on disk I/O. The flushDaemon will block instead. +const bufferSize = 256 * 1024 + +// createFiles creates all the log files for severity from sev down to infoLog. +// l.mu is held. +func (l *loggingT) createFiles(sev severity) error { + now := time.Now() + // Files are created in decreasing severity order, so as soon as we find one + // has already been created, we can stop. + for s := sev; s >= infoLog && l.file[s] == nil; s-- { + sb := &syncBuffer{ + logger: l, + sev: s, + } + if err := sb.rotateFile(now); err != nil { + return err + } + l.file[s] = sb + } + return nil +} + +const flushInterval = 30 * time.Second + +// flushDaemon periodically flushes the log file buffers. +func (l *loggingT) flushDaemon() { + for _ = range time.NewTicker(flushInterval).C { + l.lockAndFlushAll() + } +} + +// lockAndFlushAll is like flushAll but locks l.mu first. +func (l *loggingT) lockAndFlushAll() { + l.mu.Lock() + l.flushAll() + l.mu.Unlock() +} + +// flushAll flushes all the logs and attempts to "sync" their data to disk. +// l.mu is held. +func (l *loggingT) flushAll() { + // Flush from fatal down, in case there's trouble flushing. + for s := fatalLog; s >= infoLog; s-- { + file := l.file[s] + if file != nil { + file.Flush() // ignore error + file.Sync() // ignore error + } + } +} + +// CopyStandardLogTo arranges for messages written to the Go "log" package's +// default logs to also appear in the Google logs for the named and lower +// severities. Subsequent changes to the standard log's default output location +// or format may break this behavior. +// +// Valid names are "INFO", "WARNING", "ERROR", and "FATAL". If the name is not +// recognized, CopyStandardLogTo panics. +func CopyStandardLogTo(name string) { + sev, ok := severityByName(name) + if !ok { + panic(fmt.Sprintf("log.CopyStandardLogTo(%q): unrecognized severity name", name)) + } + // Set a log format that captures the user's file and line: + // d.go:23: message + stdLog.SetFlags(stdLog.Lshortfile) + stdLog.SetOutput(logBridge(sev)) +} + +// logBridge provides the Write method that enables CopyStandardLogTo to connect +// Go's standard logs to the logs provided by this package. +type logBridge severity + +// Write parses the standard logging line and passes its components to the +// logger for severity(lb). +func (lb logBridge) Write(b []byte) (n int, err error) { + var ( + file = "???" + line = 1 + text string + ) + // Split "d.go:23: message" into "d.go", "23", and "message". + if parts := bytes.SplitN(b, []byte{':'}, 3); len(parts) != 3 || len(parts[0]) < 1 || len(parts[2]) < 1 { + text = fmt.Sprintf("bad log format: %s", b) + } else { + file = string(parts[0]) + text = string(parts[2][1:]) // skip leading space + line, err = strconv.Atoi(string(parts[1])) + if err != nil { + text = fmt.Sprintf("bad line number: %s", b) + line = 1 + } + } + // printWithFileLine with alsoToStderr=true, so standard log messages + // always appear on standard error. + logging.printWithFileLine(severity(lb), file, line, true, text) + return len(b), nil +} + +// setV computes and remembers the V level for a given PC +// when vmodule is enabled. +// File pattern matching takes the basename of the file, stripped +// of its .go suffix, and uses filepath.Match, which is a little more +// general than the *? matching used in C++. +// l.mu is held. +func (l *loggingT) setV(pc uintptr) Level { + fn := runtime.FuncForPC(pc) + file, _ := fn.FileLine(pc) + // The file is something like /a/b/c/d.go. We want just the d. + if strings.HasSuffix(file, ".go") { + file = file[:len(file)-3] + } + if slash := strings.LastIndex(file, "/"); slash >= 0 { + file = file[slash+1:] + } + for _, filter := range l.vmodule.filter { + if filter.match(file) { + l.vmap[pc] = filter.level + return filter.level + } + } + l.vmap[pc] = 0 + return 0 +} + +// Verbose is a boolean type that implements Infof (like Printf) etc. +// See the documentation of V for more information. +type Verbose bool + +// V reports whether verbosity at the call site is at least the requested level. +// The returned value is a boolean of type Verbose, which implements Info, Infoln +// and Infof. These methods will write to the Info log if called. +// Thus, one may write either +// if glog.V(2) { glog.Info("log this") } +// or +// glog.V(2).Info("log this") +// The second form is shorter but the first is cheaper if logging is off because it does +// not evaluate its arguments. +// +// Whether an individual call to V generates a log record depends on the setting of +// the -v and --vmodule flags; both are off by default. If the level in the call to +// V is at least the value of -v, or of -vmodule for the source file containing the +// call, the V call will log. +func V(level Level) Verbose { + // This function tries hard to be cheap unless there's work to do. + // The fast path is two atomic loads and compares. + + // Here is a cheap but safe test to see if V logging is enabled globally. + if logging.verbosity.get() >= level { + return Verbose(true) + } + + // It's off globally but it vmodule may still be set. + // Here is another cheap but safe test to see if vmodule is enabled. + if atomic.LoadInt32(&logging.filterLength) > 0 { + // Now we need a proper lock to use the logging structure. The pcs field + // is shared so we must lock before accessing it. This is fairly expensive, + // but if V logging is enabled we're slow anyway. + logging.mu.Lock() + defer logging.mu.Unlock() + if runtime.Callers(2, logging.pcs[:]) == 0 { + return Verbose(false) + } + v, ok := logging.vmap[logging.pcs[0]] + if !ok { + v = logging.setV(logging.pcs[0]) + } + return Verbose(v >= level) + } + return Verbose(false) +} + +// Info is equivalent to the global Info function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Info(args ...interface{}) { + if v { + logging.print(infoLog, args...) + } +} + +// Infoln is equivalent to the global Infoln function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infoln(args ...interface{}) { + if v { + logging.println(infoLog, args...) + } +} + +// Infof is equivalent to the global Infof function, guarded by the value of v. +// See the documentation of V for usage. +func (v Verbose) Infof(format string, args ...interface{}) { + if v { + logging.printf(infoLog, format, args...) + } +} + +// Info logs to the INFO log. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Info(args ...interface{}) { + logging.print(infoLog, args...) +} + +// InfoDepth acts as Info but uses depth to determine which call frame to log. +// InfoDepth(0, "msg") is the same as Info("msg"). +func InfoDepth(depth int, args ...interface{}) { + logging.printDepth(infoLog, depth, args...) +} + +// Infoln logs to the INFO log. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Infoln(args ...interface{}) { + logging.println(infoLog, args...) +} + +// Infof logs to the INFO log. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Infof(format string, args ...interface{}) { + logging.printf(infoLog, format, args...) +} + +// Warning logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Warning(args ...interface{}) { + logging.print(warningLog, args...) +} + +// WarningDepth acts as Warning but uses depth to determine which call frame to log. +// WarningDepth(0, "msg") is the same as Warning("msg"). +func WarningDepth(depth int, args ...interface{}) { + logging.printDepth(warningLog, depth, args...) +} + +// Warningln logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Warningln(args ...interface{}) { + logging.println(warningLog, args...) +} + +// Warningf logs to the WARNING and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Warningf(format string, args ...interface{}) { + logging.printf(warningLog, format, args...) +} + +// Error logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Error(args ...interface{}) { + logging.print(errorLog, args...) +} + +// ErrorDepth acts as Error but uses depth to determine which call frame to log. +// ErrorDepth(0, "msg") is the same as Error("msg"). +func ErrorDepth(depth int, args ...interface{}) { + logging.printDepth(errorLog, depth, args...) +} + +// Errorln logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Errorln(args ...interface{}) { + logging.println(errorLog, args...) +} + +// Errorf logs to the ERROR, WARNING, and INFO logs. +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Errorf(format string, args ...interface{}) { + logging.printf(errorLog, format, args...) +} + +// Fatal logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Fatal(args ...interface{}) { + logging.print(fatalLog, args...) +} + +// FatalDepth acts as Fatal but uses depth to determine which call frame to log. +// FatalDepth(0, "msg") is the same as Fatal("msg"). +func FatalDepth(depth int, args ...interface{}) { + logging.printDepth(fatalLog, depth, args...) +} + +// Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +func Fatalln(args ...interface{}) { + logging.println(fatalLog, args...) +} + +// Fatalf logs to the FATAL, ERROR, WARNING, and INFO logs, +// including a stack trace of all running goroutines, then calls os.Exit(255). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Fatalf(format string, args ...interface{}) { + logging.printf(fatalLog, format, args...) +} + +// fatalNoStacks is non-zero if we are to exit without dumping goroutine stacks. +// It allows Exit and relatives to use the Fatal logs. +var fatalNoStacks uint32 + +// Exit logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Print; a newline is appended if missing. +func Exit(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.print(fatalLog, args...) +} + +// ExitDepth acts as Exit but uses depth to determine which call frame to log. +// ExitDepth(0, "msg") is the same as Exit("msg"). +func ExitDepth(depth int, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printDepth(fatalLog, depth, args...) +} + +// Exitln logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +func Exitln(args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.println(fatalLog, args...) +} + +// Exitf logs to the FATAL, ERROR, WARNING, and INFO logs, then calls os.Exit(1). +// Arguments are handled in the manner of fmt.Printf; a newline is appended if missing. +func Exitf(format string, args ...interface{}) { + atomic.StoreUint32(&fatalNoStacks, 1) + logging.printf(fatalLog, format, args...) +} diff --git a/vendor/github.com/golang/glog/glog_file.go b/vendor/github.com/golang/glog/glog_file.go new file mode 100644 index 000000000000..65075d281110 --- /dev/null +++ b/vendor/github.com/golang/glog/glog_file.go @@ -0,0 +1,124 @@ +// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/ +// +// Copyright 2013 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// File I/O for logs. + +package glog + +import ( + "errors" + "flag" + "fmt" + "os" + "os/user" + "path/filepath" + "strings" + "sync" + "time" +) + +// MaxSize is the maximum size of a log file in bytes. +var MaxSize uint64 = 1024 * 1024 * 1800 + +// logDirs lists the candidate directories for new log files. +var logDirs []string + +// If non-empty, overrides the choice of directory in which to write logs. +// See createLogDirs for the full list of possible destinations. +var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory") + +func createLogDirs() { + if *logDir != "" { + logDirs = append(logDirs, *logDir) + } + logDirs = append(logDirs, os.TempDir()) +} + +var ( + pid = os.Getpid() + program = filepath.Base(os.Args[0]) + host = "unknownhost" + userName = "unknownuser" +) + +func init() { + h, err := os.Hostname() + if err == nil { + host = shortHostname(h) + } + + current, err := user.Current() + if err == nil { + userName = current.Username + } + + // Sanitize userName since it may contain filepath separators on Windows. + userName = strings.Replace(userName, `\`, "_", -1) +} + +// shortHostname returns its argument, truncating at the first period. +// For instance, given "www.google.com" it returns "www". +func shortHostname(hostname string) string { + if i := strings.Index(hostname, "."); i >= 0 { + return hostname[:i] + } + return hostname +} + +// logName returns a new log file name containing tag, with start time t, and +// the name for the symlink for tag. +func logName(tag string, t time.Time) (name, link string) { + name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d", + program, + host, + userName, + tag, + t.Year(), + t.Month(), + t.Day(), + t.Hour(), + t.Minute(), + t.Second(), + pid) + return name, program + "." + tag +} + +var onceLogDirs sync.Once + +// create creates a new log file and returns the file and its filename, which +// contains tag ("INFO", "FATAL", etc.) and t. If the file is created +// successfully, create also attempts to update the symlink for that tag, ignoring +// errors. +func create(tag string, t time.Time) (f *os.File, filename string, err error) { + onceLogDirs.Do(createLogDirs) + if len(logDirs) == 0 { + return nil, "", errors.New("log: no log dirs") + } + name, link := logName(tag, t) + var lastErr error + for _, dir := range logDirs { + fname := filepath.Join(dir, name) + f, err := os.Create(fname) + if err == nil { + symlink := filepath.Join(dir, link) + os.Remove(symlink) // ignore err + os.Symlink(name, symlink) // ignore err + return f, fname, nil + } + lastErr = err + } + return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr) +} diff --git a/vendor/github.com/google/btree/LICENSE b/vendor/github.com/google/btree/LICENSE new file mode 100644 index 000000000000..d64569567334 --- /dev/null +++ b/vendor/github.com/google/btree/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/google/btree/README.md b/vendor/github.com/google/btree/README.md new file mode 100644 index 000000000000..6062a4dacd49 --- /dev/null +++ b/vendor/github.com/google/btree/README.md @@ -0,0 +1,12 @@ +# BTree implementation for Go + +![Travis CI Build Status](https://api.travis-ci.org/google/btree.svg?branch=master) + +This package provides an in-memory B-Tree implementation for Go, useful as +an ordered, mutable data structure. + +The API is based off of the wonderful +http://godoc.org/github.com/petar/GoLLRB/llrb, and is meant to allow btree to +act as a drop-in replacement for gollrb trees. + +See http://godoc.org/github.com/google/btree for documentation. diff --git a/vendor/github.com/google/btree/btree.go b/vendor/github.com/google/btree/btree.go new file mode 100644 index 000000000000..6ff062f9bb4e --- /dev/null +++ b/vendor/github.com/google/btree/btree.go @@ -0,0 +1,890 @@ +// Copyright 2014 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package btree implements in-memory B-Trees of arbitrary degree. +// +// btree implements an in-memory B-Tree for use as an ordered data structure. +// It is not meant for persistent storage solutions. +// +// It has a flatter structure than an equivalent red-black or other binary tree, +// which in some cases yields better memory usage and/or performance. +// See some discussion on the matter here: +// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html +// Note, though, that this project is in no way related to the C++ B-Tree +// implementation written about there. +// +// Within this tree, each node contains a slice of items and a (possibly nil) +// slice of children. For basic numeric values or raw structs, this can cause +// efficiency differences when compared to equivalent C++ template code that +// stores values in arrays within the node: +// * Due to the overhead of storing values as interfaces (each +// value needs to be stored as the value itself, then 2 words for the +// interface pointing to that value and its type), resulting in higher +// memory use. +// * Since interfaces can point to values anywhere in memory, values are +// most likely not stored in contiguous blocks, resulting in a higher +// number of cache misses. +// These issues don't tend to matter, though, when working with strings or other +// heap-allocated structures, since C++-equivalent structures also must store +// pointers and also distribute their values across the heap. +// +// This implementation is designed to be a drop-in replacement to gollrb.LLRB +// trees, (http://github.com/petar/gollrb), an excellent and probably the most +// widely used ordered tree implementation in the Go ecosystem currently. +// Its functions, therefore, exactly mirror those of +// llrb.LLRB where possible. Unlike gollrb, though, we currently don't +// support storing multiple equivalent values. +package btree + +import ( + "fmt" + "io" + "sort" + "strings" + "sync" +) + +// Item represents a single object in the tree. +type Item interface { + // Less tests whether the current item is less than the given argument. + // + // This must provide a strict weak ordering. + // If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only + // hold one of either a or b in the tree). + Less(than Item) bool +} + +const ( + DefaultFreeListSize = 32 +) + +var ( + nilItems = make(items, 16) + nilChildren = make(children, 16) +) + +// FreeList represents a free list of btree nodes. By default each +// BTree has its own FreeList, but multiple BTrees can share the same +// FreeList. +// Two Btrees using the same freelist are safe for concurrent write access. +type FreeList struct { + mu sync.Mutex + freelist []*node +} + +// NewFreeList creates a new free list. +// size is the maximum size of the returned free list. +func NewFreeList(size int) *FreeList { + return &FreeList{freelist: make([]*node, 0, size)} +} + +func (f *FreeList) newNode() (n *node) { + f.mu.Lock() + index := len(f.freelist) - 1 + if index < 0 { + f.mu.Unlock() + return new(node) + } + n = f.freelist[index] + f.freelist[index] = nil + f.freelist = f.freelist[:index] + f.mu.Unlock() + return +} + +// freeNode adds the given node to the list, returning true if it was added +// and false if it was discarded. +func (f *FreeList) freeNode(n *node) (out bool) { + f.mu.Lock() + if len(f.freelist) < cap(f.freelist) { + f.freelist = append(f.freelist, n) + out = true + } + f.mu.Unlock() + return +} + +// ItemIterator allows callers of Ascend* to iterate in-order over portions of +// the tree. When this function returns false, iteration will stop and the +// associated Ascend* function will immediately return. +type ItemIterator func(i Item) bool + +// New creates a new B-Tree with the given degree. +// +// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items +// and 2-4 children). +func New(degree int) *BTree { + return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize)) +} + +// NewWithFreeList creates a new B-Tree that uses the given node free list. +func NewWithFreeList(degree int, f *FreeList) *BTree { + if degree <= 1 { + panic("bad degree") + } + return &BTree{ + degree: degree, + cow: ©OnWriteContext{freelist: f}, + } +} + +// items stores items in a node. +type items []Item + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *items) insertAt(index int, item Item) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = item +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *items) removeAt(index int) Item { + item := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return item +} + +// pop removes and returns the last element in the list. +func (s *items) pop() (out Item) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index items. index must be less than or equal to length. +func (s *items) truncate(index int) { + var toClear items + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilItems):] + } +} + +// find returns the index where the given item should be inserted into this +// list. 'found' is true if the item already exists in the list at the given +// index. +func (s items) find(item Item) (index int, found bool) { + i := sort.Search(len(s), func(i int) bool { + return item.Less(s[i]) + }) + if i > 0 && !s[i-1].Less(item) { + return i - 1, true + } + return i, false +} + +// children stores child nodes in a node. +type children []*node + +// insertAt inserts a value into the given index, pushing all subsequent values +// forward. +func (s *children) insertAt(index int, n *node) { + *s = append(*s, nil) + if index < len(*s) { + copy((*s)[index+1:], (*s)[index:]) + } + (*s)[index] = n +} + +// removeAt removes a value at a given index, pulling all subsequent values +// back. +func (s *children) removeAt(index int) *node { + n := (*s)[index] + copy((*s)[index:], (*s)[index+1:]) + (*s)[len(*s)-1] = nil + *s = (*s)[:len(*s)-1] + return n +} + +// pop removes and returns the last element in the list. +func (s *children) pop() (out *node) { + index := len(*s) - 1 + out = (*s)[index] + (*s)[index] = nil + *s = (*s)[:index] + return +} + +// truncate truncates this instance at index so that it contains only the +// first index children. index must be less than or equal to length. +func (s *children) truncate(index int) { + var toClear children + *s, toClear = (*s)[:index], (*s)[index:] + for len(toClear) > 0 { + toClear = toClear[copy(toClear, nilChildren):] + } +} + +// node is an internal node in a tree. +// +// It must at all times maintain the invariant that either +// * len(children) == 0, len(items) unconstrained +// * len(children) == len(items) + 1 +type node struct { + items items + children children + cow *copyOnWriteContext +} + +func (n *node) mutableFor(cow *copyOnWriteContext) *node { + if n.cow == cow { + return n + } + out := cow.newNode() + if cap(out.items) >= len(n.items) { + out.items = out.items[:len(n.items)] + } else { + out.items = make(items, len(n.items), cap(n.items)) + } + copy(out.items, n.items) + // Copy children + if cap(out.children) >= len(n.children) { + out.children = out.children[:len(n.children)] + } else { + out.children = make(children, len(n.children), cap(n.children)) + } + copy(out.children, n.children) + return out +} + +func (n *node) mutableChild(i int) *node { + c := n.children[i].mutableFor(n.cow) + n.children[i] = c + return c +} + +// split splits the given node at the given index. The current node shrinks, +// and this function returns the item that existed at that index and a new node +// containing all items/children after it. +func (n *node) split(i int) (Item, *node) { + item := n.items[i] + next := n.cow.newNode() + next.items = append(next.items, n.items[i+1:]...) + n.items.truncate(i) + if len(n.children) > 0 { + next.children = append(next.children, n.children[i+1:]...) + n.children.truncate(i + 1) + } + return item, next +} + +// maybeSplitChild checks if a child should be split, and if so splits it. +// Returns whether or not a split occurred. +func (n *node) maybeSplitChild(i, maxItems int) bool { + if len(n.children[i].items) < maxItems { + return false + } + first := n.mutableChild(i) + item, second := first.split(maxItems / 2) + n.items.insertAt(i, item) + n.children.insertAt(i+1, second) + return true +} + +// insert inserts an item into the subtree rooted at this node, making sure +// no nodes in the subtree exceed maxItems items. Should an equivalent item be +// be found/replaced by insert, it will be returned. +func (n *node) insert(item Item, maxItems int) Item { + i, found := n.items.find(item) + if found { + out := n.items[i] + n.items[i] = item + return out + } + if len(n.children) == 0 { + n.items.insertAt(i, item) + return nil + } + if n.maybeSplitChild(i, maxItems) { + inTree := n.items[i] + switch { + case item.Less(inTree): + // no change, we want first split node + case inTree.Less(item): + i++ // we want second split node + default: + out := n.items[i] + n.items[i] = item + return out + } + } + return n.mutableChild(i).insert(item, maxItems) +} + +// get finds the given key in the subtree and returns it. +func (n *node) get(key Item) Item { + i, found := n.items.find(key) + if found { + return n.items[i] + } else if len(n.children) > 0 { + return n.children[i].get(key) + } + return nil +} + +// min returns the first item in the subtree. +func min(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[0] + } + if len(n.items) == 0 { + return nil + } + return n.items[0] +} + +// max returns the last item in the subtree. +func max(n *node) Item { + if n == nil { + return nil + } + for len(n.children) > 0 { + n = n.children[len(n.children)-1] + } + if len(n.items) == 0 { + return nil + } + return n.items[len(n.items)-1] +} + +// toRemove details what item to remove in a node.remove call. +type toRemove int + +const ( + removeItem toRemove = iota // removes the given item + removeMin // removes smallest item in the subtree + removeMax // removes largest item in the subtree +) + +// remove removes an item from the subtree rooted at this node. +func (n *node) remove(item Item, minItems int, typ toRemove) Item { + var i int + var found bool + switch typ { + case removeMax: + if len(n.children) == 0 { + return n.items.pop() + } + i = len(n.items) + case removeMin: + if len(n.children) == 0 { + return n.items.removeAt(0) + } + i = 0 + case removeItem: + i, found = n.items.find(item) + if len(n.children) == 0 { + if found { + return n.items.removeAt(i) + } + return nil + } + default: + panic("invalid type") + } + // If we get to here, we have children. + if len(n.children[i].items) <= minItems { + return n.growChildAndRemove(i, item, minItems, typ) + } + child := n.mutableChild(i) + // Either we had enough items to begin with, or we've done some + // merging/stealing, because we've got enough now and we're ready to return + // stuff. + if found { + // The item exists at index 'i', and the child we've selected can give us a + // predecessor, since if we've gotten here it's got > minItems items in it. + out := n.items[i] + // We use our special-case 'remove' call with typ=maxItem to pull the + // predecessor of item i (the rightmost leaf of our immediate left child) + // and set it into where we pulled the item from. + n.items[i] = child.remove(nil, minItems, removeMax) + return out + } + // Final recursive call. Once we're here, we know that the item isn't in this + // node and that the child is big enough to remove from. + return child.remove(item, minItems, typ) +} + +// growChildAndRemove grows child 'i' to make sure it's possible to remove an +// item from it while keeping it at minItems, then calls remove to actually +// remove it. +// +// Most documentation says we have to do two sets of special casing: +// 1) item is in this node +// 2) item is in child +// In both cases, we need to handle the two subcases: +// A) node has enough values that it can spare one +// B) node doesn't have enough values +// For the latter, we have to check: +// a) left sibling has node to spare +// b) right sibling has node to spare +// c) we must merge +// To simplify our code here, we handle cases #1 and #2 the same: +// If a node doesn't have enough items, we make sure it does (using a,b,c). +// We then simply redo our remove call, and the second time (regardless of +// whether we're in case 1 or 2), we'll have enough items and can guarantee +// that we hit case A. +func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item { + if i > 0 && len(n.children[i-1].items) > minItems { + // Steal from left child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i - 1) + stolenItem := stealFrom.items.pop() + child.items.insertAt(0, n.items[i-1]) + n.items[i-1] = stolenItem + if len(stealFrom.children) > 0 { + child.children.insertAt(0, stealFrom.children.pop()) + } + } else if i < len(n.items) && len(n.children[i+1].items) > minItems { + // steal from right child + child := n.mutableChild(i) + stealFrom := n.mutableChild(i + 1) + stolenItem := stealFrom.items.removeAt(0) + child.items = append(child.items, n.items[i]) + n.items[i] = stolenItem + if len(stealFrom.children) > 0 { + child.children = append(child.children, stealFrom.children.removeAt(0)) + } + } else { + if i >= len(n.items) { + i-- + } + child := n.mutableChild(i) + // merge with right child + mergeItem := n.items.removeAt(i) + mergeChild := n.children.removeAt(i + 1) + child.items = append(child.items, mergeItem) + child.items = append(child.items, mergeChild.items...) + child.children = append(child.children, mergeChild.children...) + n.cow.freeNode(mergeChild) + } + return n.remove(item, minItems, typ) +} + +type direction int + +const ( + descend = direction(-1) + ascend = direction(+1) +) + +// iterate provides a simple method for iterating over elements in the tree. +// +// When ascending, the 'start' should be less than 'stop' and when descending, +// the 'start' should be greater than 'stop'. Setting 'includeStart' to true +// will force the iterator to include the first item when it equals 'start', +// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a +// "greaterThan" or "lessThan" queries. +func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) { + var ok, found bool + var index int + switch dir { + case ascend: + if start != nil { + index, _ = n.items.find(start) + } + for i := index; i < len(n.items); i++ { + if len(n.children) > 0 { + if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if !includeStart && !hit && start != nil && !start.Less(n.items[i]) { + hit = true + continue + } + hit = true + if stop != nil && !n.items[i].Less(stop) { + return hit, false + } + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + case descend: + if start != nil { + index, found = n.items.find(start) + if !found { + index = index - 1 + } + } else { + index = len(n.items) - 1 + } + for i := index; i >= 0; i-- { + if start != nil && !n.items[i].Less(start) { + if !includeStart || hit || start.Less(n.items[i]) { + continue + } + } + if len(n.children) > 0 { + if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + if stop != nil && !stop.Less(n.items[i]) { + return hit, false // continue + } + hit = true + if !iter(n.items[i]) { + return hit, false + } + } + if len(n.children) > 0 { + if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok { + return hit, false + } + } + } + return hit, true +} + +// Used for testing/debugging purposes. +func (n *node) print(w io.Writer, level int) { + fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items) + for _, c := range n.children { + c.print(w, level+1) + } +} + +// BTree is an implementation of a B-Tree. +// +// BTree stores Item instances in an ordered structure, allowing easy insertion, +// removal, and iteration. +// +// Write operations are not safe for concurrent mutation by multiple +// goroutines, but Read operations are. +type BTree struct { + degree int + length int + root *node + cow *copyOnWriteContext +} + +// copyOnWriteContext pointers determine node ownership... a tree with a write +// context equivalent to a node's write context is allowed to modify that node. +// A tree whose write context does not match a node's is not allowed to modify +// it, and must create a new, writable copy (IE: it's a Clone). +// +// When doing any write operation, we maintain the invariant that the current +// node's context is equal to the context of the tree that requested the write. +// We do this by, before we descend into any node, creating a copy with the +// correct context if the contexts don't match. +// +// Since the node we're currently visiting on any write has the requesting +// tree's context, that node is modifiable in place. Children of that node may +// not share context, but before we descend into them, we'll make a mutable +// copy. +type copyOnWriteContext struct { + freelist *FreeList +} + +// Clone clones the btree, lazily. Clone should not be called concurrently, +// but the original tree (t) and the new tree (t2) can be used concurrently +// once the Clone call completes. +// +// The internal tree structure of b is marked read-only and shared between t and +// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes +// whenever one of b's original nodes would have been modified. Read operations +// should have no performance degredation. Write operations for both t and t2 +// will initially experience minor slow-downs caused by additional allocs and +// copies due to the aforementioned copy-on-write logic, but should converge to +// the original performance characteristics of the original tree. +func (t *BTree) Clone() (t2 *BTree) { + // Create two entirely new copy-on-write contexts. + // This operation effectively creates three trees: + // the original, shared nodes (old b.cow) + // the new b.cow nodes + // the new out.cow nodes + cow1, cow2 := *t.cow, *t.cow + out := *t + t.cow = &cow1 + out.cow = &cow2 + return &out +} + +// maxItems returns the max number of items to allow per node. +func (t *BTree) maxItems() int { + return t.degree*2 - 1 +} + +// minItems returns the min number of items to allow per node (ignored for the +// root node). +func (t *BTree) minItems() int { + return t.degree - 1 +} + +func (c *copyOnWriteContext) newNode() (n *node) { + n = c.freelist.newNode() + n.cow = c + return +} + +type freeType int + +const ( + ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist) + ftStored // node was stored in the freelist for later use + ftNotOwned // node was ignored by COW, since it's owned by another one +) + +// freeNode frees a node within a given COW context, if it's owned by that +// context. It returns what happened to the node (see freeType const +// documentation). +func (c *copyOnWriteContext) freeNode(n *node) freeType { + if n.cow == c { + // clear to allow GC + n.items.truncate(0) + n.children.truncate(0) + n.cow = nil + if c.freelist.freeNode(n) { + return ftStored + } else { + return ftFreelistFull + } + } else { + return ftNotOwned + } +} + +// ReplaceOrInsert adds the given item to the tree. If an item in the tree +// already equals the given one, it is removed from the tree and returned. +// Otherwise, nil is returned. +// +// nil cannot be added to the tree (will panic). +func (t *BTree) ReplaceOrInsert(item Item) Item { + if item == nil { + panic("nil item being added to BTree") + } + if t.root == nil { + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item) + t.length++ + return nil + } else { + t.root = t.root.mutableFor(t.cow) + if len(t.root.items) >= t.maxItems() { + item2, second := t.root.split(t.maxItems() / 2) + oldroot := t.root + t.root = t.cow.newNode() + t.root.items = append(t.root.items, item2) + t.root.children = append(t.root.children, oldroot, second) + } + } + out := t.root.insert(item, t.maxItems()) + if out == nil { + t.length++ + } + return out +} + +// Delete removes an item equal to the passed in item from the tree, returning +// it. If no such item exists, returns nil. +func (t *BTree) Delete(item Item) Item { + return t.deleteItem(item, removeItem) +} + +// DeleteMin removes the smallest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMin() Item { + return t.deleteItem(nil, removeMin) +} + +// DeleteMax removes the largest item in the tree and returns it. +// If no such item exists, returns nil. +func (t *BTree) DeleteMax() Item { + return t.deleteItem(nil, removeMax) +} + +func (t *BTree) deleteItem(item Item, typ toRemove) Item { + if t.root == nil || len(t.root.items) == 0 { + return nil + } + t.root = t.root.mutableFor(t.cow) + out := t.root.remove(item, t.minItems(), typ) + if len(t.root.items) == 0 && len(t.root.children) > 0 { + oldroot := t.root + t.root = t.root.children[0] + t.cow.freeNode(oldroot) + } + if out != nil { + t.length-- + } + return out +} + +// AscendRange calls the iterator for every value in the tree within the range +// [greaterOrEqual, lessThan), until iterator returns false. +func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator) +} + +// AscendLessThan calls the iterator for every value in the tree within the range +// [first, pivot), until iterator returns false. +func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, pivot, false, false, iterator) +} + +// AscendGreaterOrEqual calls the iterator for every value in the tree within +// the range [pivot, last], until iterator returns false. +func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, pivot, nil, true, false, iterator) +} + +// Ascend calls the iterator for every value in the tree within the range +// [first, last], until iterator returns false. +func (t *BTree) Ascend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(ascend, nil, nil, false, false, iterator) +} + +// DescendRange calls the iterator for every value in the tree within the range +// [lessOrEqual, greaterThan), until iterator returns false. +func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator) +} + +// DescendLessOrEqual calls the iterator for every value in the tree within the range +// [pivot, first], until iterator returns false. +func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, pivot, nil, true, false, iterator) +} + +// DescendGreaterThan calls the iterator for every value in the tree within +// the range (pivot, last], until iterator returns false. +func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, pivot, false, false, iterator) +} + +// Descend calls the iterator for every value in the tree within the range +// [last, first], until iterator returns false. +func (t *BTree) Descend(iterator ItemIterator) { + if t.root == nil { + return + } + t.root.iterate(descend, nil, nil, false, false, iterator) +} + +// Get looks for the key item in the tree, returning it. It returns nil if +// unable to find that item. +func (t *BTree) Get(key Item) Item { + if t.root == nil { + return nil + } + return t.root.get(key) +} + +// Min returns the smallest item in the tree, or nil if the tree is empty. +func (t *BTree) Min() Item { + return min(t.root) +} + +// Max returns the largest item in the tree, or nil if the tree is empty. +func (t *BTree) Max() Item { + return max(t.root) +} + +// Has returns true if the given key is in the tree. +func (t *BTree) Has(key Item) bool { + return t.Get(key) != nil +} + +// Len returns the number of items currently in the tree. +func (t *BTree) Len() int { + return t.length +} + +// Clear removes all items from the btree. If addNodesToFreelist is true, +// t's nodes are added to its freelist as part of this call, until the freelist +// is full. Otherwise, the root node is simply dereferenced and the subtree +// left to Go's normal GC processes. +// +// This can be much faster +// than calling Delete on all elements, because that requires finding/removing +// each element in the tree and updating the tree accordingly. It also is +// somewhat faster than creating a new tree to replace the old one, because +// nodes from the old tree are reclaimed into the freelist for use by the new +// one, instead of being lost to the garbage collector. +// +// This call takes: +// O(1): when addNodesToFreelist is false, this is a single operation. +// O(1): when the freelist is already full, it breaks out immediately +// O(freelist size): when the freelist is empty and the nodes are all owned +// by this tree, nodes are added to the freelist until full. +// O(tree size): when all nodes are owned by another tree, all nodes are +// iterated over looking for nodes to add to the freelist, and due to +// ownership, none are. +func (t *BTree) Clear(addNodesToFreelist bool) { + if t.root != nil && addNodesToFreelist { + t.root.reset(t.cow) + } + t.root, t.length = nil, 0 +} + +// reset returns a subtree to the freelist. It breaks out immediately if the +// freelist is full, since the only benefit of iterating is to fill that +// freelist up. Returns true if parent reset call should continue. +func (n *node) reset(c *copyOnWriteContext) bool { + for _, child := range n.children { + if !child.reset(c) { + return false + } + } + return c.freeNode(n) != ftFreelistFull +} + +// Int implements the Item interface for integers. +type Int int + +// Less returns true if int(a) < int(b). +func (a Int) Less(b Item) bool { + return a < b.(Int) +} diff --git a/vendor/github.com/googleapis/gnostic/LICENSE b/vendor/github.com/googleapis/gnostic/LICENSE new file mode 100644 index 000000000000..6b0b1270ff0c --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/LICENSE @@ -0,0 +1,203 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go new file mode 100644 index 000000000000..4fd44c45e228 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.go @@ -0,0 +1,8847 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +package openapi_v2 + +import ( + "fmt" + "github.com/googleapis/gnostic/compiler" + "gopkg.in/yaml.v2" + "regexp" + "strings" +) + +// Version returns the package name (and OpenAPI version). +func Version() string { + return "openapi_v2" +} + +// NewAdditionalPropertiesItem creates an object of type AdditionalPropertiesItem if possible, returning an error if not. +func NewAdditionalPropertiesItem(in interface{}, context *compiler.Context) (*AdditionalPropertiesItem, error) { + errors := make([]error, 0) + x := &AdditionalPropertiesItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &AdditionalPropertiesItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // bool boolean = 2; + boolValue, ok := in.(bool) + if ok { + x.Oneof = &AdditionalPropertiesItem_Boolean{Boolean: boolValue} + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewAny creates an object of type Any if possible, returning an error if not. +func NewAny(in interface{}, context *compiler.Context) (*Any, error) { + errors := make([]error, 0) + x := &Any{} + bytes, _ := yaml.Marshal(in) + x.Yaml = string(bytes) + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewApiKeySecurity creates an object of type ApiKeySecurity if possible, returning an error if not. +func NewApiKeySecurity(in interface{}, context *compiler.Context) (*ApiKeySecurity, error) { + errors := make([]error, 0) + x := &ApiKeySecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [apiKey] + if ok && !compiler.StringArrayContainsValue([]string{"apiKey"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header query] + if ok && !compiler.StringArrayContainsValue([]string{"header", "query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBasicAuthenticationSecurity creates an object of type BasicAuthenticationSecurity if possible, returning an error if not. +func NewBasicAuthenticationSecurity(in interface{}, context *compiler.Context) (*BasicAuthenticationSecurity, error) { + errors := make([]error, 0) + x := &BasicAuthenticationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [basic] + if ok && !compiler.StringArrayContainsValue([]string{"basic"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewBodyParameter creates an object of type BodyParameter if possible, returning an error if not. +func NewBodyParameter(in interface{}, context *compiler.Context) (*BodyParameter, error) { + errors := make([]error, 0) + x := &BodyParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "schema"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "in", "name", "required", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 2; + v2 := compiler.MapValueForKey(m, "name") + if v2 != nil { + x.Name, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 3; + v3 := compiler.MapValueForKey(m, "in") + if v3 != nil { + x.In, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [body] + if ok && !compiler.StringArrayContainsValue([]string{"body"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool required = 4; + v4 := compiler.MapValueForKey(m, "required") + if v4 != nil { + x.Required, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema schema = 5; + v5 := compiler.MapValueForKey(m, "schema") + if v5 != nil { + var err error + x.Schema, err = NewSchema(v5, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewContact creates an object of type Contact if possible, returning an error if not. +func NewContact(in interface{}, context *compiler.Context) (*Contact, error) { + errors := make([]error, 0) + x := &Contact{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"email", "name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string email = 3; + v3 := compiler.MapValueForKey(m, "email") + if v3 != nil { + x.Email, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for email: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefault creates an object of type Default if possible, returning an error if not. +func NewDefault(in interface{}, context *compiler.Context) (*Default, error) { + errors := make([]error, 0) + x := &Default{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDefinitions creates an object of type Definitions if possible, returning an error if not. +func NewDefinitions(in interface{}, context *compiler.Context) (*Definitions, error) { + errors := make([]error, 0) + x := &Definitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewDocument creates an object of type Document if possible, returning an error if not. +func NewDocument(in interface{}, context *compiler.Context) (*Document, error) { + errors := make([]error, 0) + x := &Document{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"info", "paths", "swagger"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"basePath", "consumes", "definitions", "externalDocs", "host", "info", "parameters", "paths", "produces", "responses", "schemes", "security", "securityDefinitions", "swagger", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string swagger = 1; + v1 := compiler.MapValueForKey(m, "swagger") + if v1 != nil { + x.Swagger, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [2.0] + if ok && !compiler.StringArrayContainsValue([]string{"2.0"}, x.Swagger) { + message := fmt.Sprintf("has unexpected value for swagger: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Info info = 2; + v2 := compiler.MapValueForKey(m, "info") + if v2 != nil { + var err error + x.Info, err = NewInfo(v2, compiler.NewContext("info", context)) + if err != nil { + errors = append(errors, err) + } + } + // string host = 3; + v3 := compiler.MapValueForKey(m, "host") + if v3 != nil { + x.Host, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for host: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string base_path = 4; + v4 := compiler.MapValueForKey(m, "basePath") + if v4 != nil { + x.BasePath, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for basePath: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string schemes = 5; + v5 := compiler.MapValueForKey(m, "schemes") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 6; + v6 := compiler.MapValueForKey(m, "consumes") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 7; + v7 := compiler.MapValueForKey(m, "produces") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Paths paths = 8; + v8 := compiler.MapValueForKey(m, "paths") + if v8 != nil { + var err error + x.Paths, err = NewPaths(v8, compiler.NewContext("paths", context)) + if err != nil { + errors = append(errors, err) + } + } + // Definitions definitions = 9; + v9 := compiler.MapValueForKey(m, "definitions") + if v9 != nil { + var err error + x.Definitions, err = NewDefinitions(v9, compiler.NewContext("definitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // ParameterDefinitions parameters = 10; + v10 := compiler.MapValueForKey(m, "parameters") + if v10 != nil { + var err error + x.Parameters, err = NewParameterDefinitions(v10, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + } + // ResponseDefinitions responses = 11; + v11 := compiler.MapValueForKey(m, "responses") + if v11 != nil { + var err error + x.Responses, err = NewResponseDefinitions(v11, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // SecurityDefinitions security_definitions = 13; + v13 := compiler.MapValueForKey(m, "securityDefinitions") + if v13 != nil { + var err error + x.SecurityDefinitions, err = NewSecurityDefinitions(v13, compiler.NewContext("securityDefinitions", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Tag tags = 14; + v14 := compiler.MapValueForKey(m, "tags") + if v14 != nil { + // repeated Tag + x.Tags = make([]*Tag, 0) + a, ok := v14.([]interface{}) + if ok { + for _, item := range a { + y, err := NewTag(item, compiler.NewContext("tags", context)) + if err != nil { + errors = append(errors, err) + } + x.Tags = append(x.Tags, y) + } + } + } + // ExternalDocs external_docs = 15; + v15 := compiler.MapValueForKey(m, "externalDocs") + if v15 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v15, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 16; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExamples creates an object of type Examples if possible, returning an error if not. +func NewExamples(in interface{}, context *compiler.Context) (*Examples, error) { + errors := make([]error, 0) + x := &Examples{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewExternalDocs creates an object of type ExternalDocs if possible, returning an error if not. +func NewExternalDocs(in interface{}, context *compiler.Context) (*ExternalDocs, error) { + errors := make([]error, 0) + x := &ExternalDocs{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"url"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFileSchema creates an object of type FileSchema if possible, returning an error if not. +func NewFileSchema(in interface{}, context *compiler.Context) (*FileSchema, error) { + errors := make([]error, 0) + x := &FileSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"default", "description", "example", "externalDocs", "format", "readOnly", "required", "title", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string format = 1; + v1 := compiler.MapValueForKey(m, "format") + if v1 != nil { + x.Format, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 2; + v2 := compiler.MapValueForKey(m, "title") + if v2 != nil { + x.Title, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 4; + v4 := compiler.MapValueForKey(m, "default") + if v4 != nil { + var err error + x.Default, err = NewAny(v4, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string required = 5; + v5 := compiler.MapValueForKey(m, "required") + if v5 != nil { + v, ok := v5.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [file] + if ok && !compiler.StringArrayContainsValue([]string{"file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 7; + v7 := compiler.MapValueForKey(m, "readOnly") + if v7 != nil { + x.ReadOnly, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 8; + v8 := compiler.MapValueForKey(m, "externalDocs") + if v8 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v8, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 9; + v9 := compiler.MapValueForKey(m, "example") + if v9 != nil { + var err error + x.Example, err = NewAny(v9, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewFormDataParameterSubSchema creates an object of type FormDataParameterSubSchema if possible, returning an error if not. +func NewFormDataParameterSubSchema(in interface{}, context *compiler.Context) (*FormDataParameterSubSchema, error) { + errors := make([]error, 0) + x := &FormDataParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [formData] + if ok && !compiler.StringArrayContainsValue([]string{"formData"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array file] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array", "file"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeader creates an object of type Header if possible, returning an error if not. +func NewHeader(in interface{}, context *compiler.Context) (*Header, error) { + errors := make([]error, 0) + x := &Header{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 18; + v18 := compiler.MapValueForKey(m, "description") + if v18 != nil { + x.Description, ok = v18.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 19; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaderParameterSubSchema creates an object of type HeaderParameterSubSchema if possible, returning an error if not. +func NewHeaderParameterSubSchema(in interface{}, context *compiler.Context) (*HeaderParameterSubSchema, error) { + errors := make([]error, 0) + x := &HeaderParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [header] + if ok && !compiler.StringArrayContainsValue([]string{"header"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewHeaders creates an object of type Headers if possible, returning an error if not. +func NewHeaders(in interface{}, context *compiler.Context) (*Headers, error) { + errors := make([]error, 0) + x := &Headers{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedHeader additional_properties = 1; + // MAP: Header + x.AdditionalProperties = make([]*NamedHeader, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedHeader{} + pair.Name = k + var err error + pair.Value, err = NewHeader(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewInfo creates an object of type Info if possible, returning an error if not. +func NewInfo(in interface{}, context *compiler.Context) (*Info, error) { + errors := make([]error, 0) + x := &Info{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"title", "version"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"contact", "description", "license", "termsOfService", "title", "version"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string title = 1; + v1 := compiler.MapValueForKey(m, "title") + if v1 != nil { + x.Title, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string version = 2; + v2 := compiler.MapValueForKey(m, "version") + if v2 != nil { + x.Version, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for version: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string terms_of_service = 4; + v4 := compiler.MapValueForKey(m, "termsOfService") + if v4 != nil { + x.TermsOfService, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for termsOfService: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Contact contact = 5; + v5 := compiler.MapValueForKey(m, "contact") + if v5 != nil { + var err error + x.Contact, err = NewContact(v5, compiler.NewContext("contact", context)) + if err != nil { + errors = append(errors, err) + } + } + // License license = 6; + v6 := compiler.MapValueForKey(m, "license") + if v6 != nil { + var err error + x.License, err = NewLicense(v6, compiler.NewContext("license", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewItemsItem creates an object of type ItemsItem if possible, returning an error if not. +func NewItemsItem(in interface{}, context *compiler.Context) (*ItemsItem, error) { + errors := make([]error, 0) + x := &ItemsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value for item array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Schema = make([]*Schema, 0) + y, err := NewSchema(m, compiler.NewContext("", context)) + if err != nil { + return nil, err + } + x.Schema = append(x.Schema, y) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewJsonReference creates an object of type JsonReference if possible, returning an error if not. +func NewJsonReference(in interface{}, context *compiler.Context) (*JsonReference, error) { + errors := make([]error, 0) + x := &JsonReference{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"$ref"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"$ref", "description"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewLicense creates an object of type License if possible, returning an error if not. +func NewLicense(in interface{}, context *compiler.Context) (*License, error) { + errors := make([]error, 0) + x := &License{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"name", "url"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string url = 2; + v2 := compiler.MapValueForKey(m, "url") + if v2 != nil { + x.Url, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for url: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 3; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedAny creates an object of type NamedAny if possible, returning an error if not. +func NewNamedAny(in interface{}, context *compiler.Context) (*NamedAny, error) { + errors := make([]error, 0) + x := &NamedAny{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewAny(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedHeader creates an object of type NamedHeader if possible, returning an error if not. +func NewNamedHeader(in interface{}, context *compiler.Context) (*NamedHeader, error) { + errors := make([]error, 0) + x := &NamedHeader{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Header value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewHeader(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedParameter creates an object of type NamedParameter if possible, returning an error if not. +func NewNamedParameter(in interface{}, context *compiler.Context) (*NamedParameter, error) { + errors := make([]error, 0) + x := &NamedParameter{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Parameter value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewParameter(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedPathItem creates an object of type NamedPathItem if possible, returning an error if not. +func NewNamedPathItem(in interface{}, context *compiler.Context) (*NamedPathItem, error) { + errors := make([]error, 0) + x := &NamedPathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PathItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewPathItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponse creates an object of type NamedResponse if possible, returning an error if not. +func NewNamedResponse(in interface{}, context *compiler.Context) (*NamedResponse, error) { + errors := make([]error, 0) + x := &NamedResponse{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Response value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponse(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedResponseValue creates an object of type NamedResponseValue if possible, returning an error if not. +func NewNamedResponseValue(in interface{}, context *compiler.Context) (*NamedResponseValue, error) { + errors := make([]error, 0) + x := &NamedResponseValue{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ResponseValue value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewResponseValue(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSchema creates an object of type NamedSchema if possible, returning an error if not. +func NewNamedSchema(in interface{}, context *compiler.Context) (*NamedSchema, error) { + errors := make([]error, 0) + x := &NamedSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Schema value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSchema(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedSecurityDefinitionsItem creates an object of type NamedSecurityDefinitionsItem if possible, returning an error if not. +func NewNamedSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*NamedSecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &NamedSecurityDefinitionsItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SecurityDefinitionsItem value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewSecurityDefinitionsItem(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedString creates an object of type NamedString if possible, returning an error if not. +func NewNamedString(in interface{}, context *compiler.Context) (*NamedString, error) { + errors := make([]error, 0) + x := &NamedString{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + x.Value, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for value: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNamedStringArray creates an object of type NamedStringArray if possible, returning an error if not. +func NewNamedStringArray(in interface{}, context *compiler.Context) (*NamedStringArray, error) { + errors := make([]error, 0) + x := &NamedStringArray{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"name", "value"} + var allowedPatterns []*regexp.Regexp + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // StringArray value = 2; + v2 := compiler.MapValueForKey(m, "value") + if v2 != nil { + var err error + x.Value, err = NewStringArray(v2, compiler.NewContext("value", context)) + if err != nil { + errors = append(errors, err) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewNonBodyParameter creates an object of type NonBodyParameter if possible, returning an error if not. +func NewNonBodyParameter(in interface{}, context *compiler.Context) (*NonBodyParameter, error) { + errors := make([]error, 0) + x := &NonBodyParameter{} + matched := false + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"in", "name", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // HeaderParameterSubSchema header_parameter_sub_schema = 1; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewHeaderParameterSubSchema(m, compiler.NewContext("headerParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_HeaderParameterSubSchema{HeaderParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFormDataParameterSubSchema(m, compiler.NewContext("formDataParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_FormDataParameterSubSchema{FormDataParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // QueryParameterSubSchema query_parameter_sub_schema = 3; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewQueryParameterSubSchema(m, compiler.NewContext("queryParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_QueryParameterSubSchema{QueryParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + // PathParameterSubSchema path_parameter_sub_schema = 4; + { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewPathParameterSubSchema(m, compiler.NewContext("pathParameterSubSchema", context)) + if matchingError == nil { + x.Oneof = &NonBodyParameter_PathParameterSubSchema{PathParameterSubSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2AccessCodeSecurity creates an object of type Oauth2AccessCodeSecurity if possible, returning an error if not. +func NewOauth2AccessCodeSecurity(in interface{}, context *compiler.Context) (*Oauth2AccessCodeSecurity, error) { + errors := make([]error, 0) + x := &Oauth2AccessCodeSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [accessCode] + if ok && !compiler.StringArrayContainsValue([]string{"accessCode"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string token_url = 5; + v5 := compiler.MapValueForKey(m, "tokenUrl") + if v5 != nil { + x.TokenUrl, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 6; + v6 := compiler.MapValueForKey(m, "description") + if v6 != nil { + x.Description, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 7; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ApplicationSecurity creates an object of type Oauth2ApplicationSecurity if possible, returning an error if not. +func NewOauth2ApplicationSecurity(in interface{}, context *compiler.Context) (*Oauth2ApplicationSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ApplicationSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [application] + if ok && !compiler.StringArrayContainsValue([]string{"application"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2ImplicitSecurity creates an object of type Oauth2ImplicitSecurity if possible, returning an error if not. +func NewOauth2ImplicitSecurity(in interface{}, context *compiler.Context) (*Oauth2ImplicitSecurity, error) { + errors := make([]error, 0) + x := &Oauth2ImplicitSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"authorizationUrl", "flow", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"authorizationUrl", "description", "flow", "scopes", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [implicit] + if ok && !compiler.StringArrayContainsValue([]string{"implicit"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string authorization_url = 4; + v4 := compiler.MapValueForKey(m, "authorizationUrl") + if v4 != nil { + x.AuthorizationUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for authorizationUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2PasswordSecurity creates an object of type Oauth2PasswordSecurity if possible, returning an error if not. +func NewOauth2PasswordSecurity(in interface{}, context *compiler.Context) (*Oauth2PasswordSecurity, error) { + errors := make([]error, 0) + x := &Oauth2PasswordSecurity{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"flow", "tokenUrl", "type"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "flow", "scopes", "tokenUrl", "type"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [oauth2] + if ok && !compiler.StringArrayContainsValue([]string{"oauth2"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string flow = 2; + v2 := compiler.MapValueForKey(m, "flow") + if v2 != nil { + x.Flow, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [password] + if ok && !compiler.StringArrayContainsValue([]string{"password"}, x.Flow) { + message := fmt.Sprintf("has unexpected value for flow: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Oauth2Scopes scopes = 3; + v3 := compiler.MapValueForKey(m, "scopes") + if v3 != nil { + var err error + x.Scopes, err = NewOauth2Scopes(v3, compiler.NewContext("scopes", context)) + if err != nil { + errors = append(errors, err) + } + } + // string token_url = 4; + v4 := compiler.MapValueForKey(m, "tokenUrl") + if v4 != nil { + x.TokenUrl, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for tokenUrl: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 5; + v5 := compiler.MapValueForKey(m, "description") + if v5 != nil { + x.Description, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOauth2Scopes creates an object of type Oauth2Scopes if possible, returning an error if not. +func NewOauth2Scopes(in interface{}, context *compiler.Context) (*Oauth2Scopes, error) { + errors := make([]error, 0) + x := &Oauth2Scopes{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedString additional_properties = 1; + // MAP: string + x.AdditionalProperties = make([]*NamedString, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedString{} + pair.Name = k + pair.Value = v.(string) + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewOperation creates an object of type Operation if possible, returning an error if not. +func NewOperation(in interface{}, context *compiler.Context) (*Operation, error) { + errors := make([]error, 0) + x := &Operation{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"responses"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"consumes", "deprecated", "description", "externalDocs", "operationId", "parameters", "produces", "responses", "schemes", "security", "summary", "tags"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated string tags = 1; + v1 := compiler.MapValueForKey(m, "tags") + if v1 != nil { + v, ok := v1.([]interface{}) + if ok { + x.Tags = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for tags: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string summary = 2; + v2 := compiler.MapValueForKey(m, "summary") + if v2 != nil { + x.Summary, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for summary: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 4; + v4 := compiler.MapValueForKey(m, "externalDocs") + if v4 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v4, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // string operation_id = 5; + v5 := compiler.MapValueForKey(m, "operationId") + if v5 != nil { + x.OperationId, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for operationId: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string produces = 6; + v6 := compiler.MapValueForKey(m, "produces") + if v6 != nil { + v, ok := v6.([]interface{}) + if ok { + x.Produces = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for produces: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string consumes = 7; + v7 := compiler.MapValueForKey(m, "consumes") + if v7 != nil { + v, ok := v7.([]interface{}) + if ok { + x.Consumes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for consumes: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated ParametersItem parameters = 8; + v8 := compiler.MapValueForKey(m, "parameters") + if v8 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v8.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // Responses responses = 9; + v9 := compiler.MapValueForKey(m, "responses") + if v9 != nil { + var err error + x.Responses, err = NewResponses(v9, compiler.NewContext("responses", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated string schemes = 10; + v10 := compiler.MapValueForKey(m, "schemes") + if v10 != nil { + v, ok := v10.([]interface{}) + if ok { + x.Schemes = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for schemes: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [http https ws wss] + if ok && !compiler.StringArrayContainsValues([]string{"http", "https", "ws", "wss"}, x.Schemes) { + message := fmt.Sprintf("has unexpected value for schemes: %+v", v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool deprecated = 11; + v11 := compiler.MapValueForKey(m, "deprecated") + if v11 != nil { + x.Deprecated, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for deprecated: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated SecurityRequirement security = 12; + v12 := compiler.MapValueForKey(m, "security") + if v12 != nil { + // repeated SecurityRequirement + x.Security = make([]*SecurityRequirement, 0) + a, ok := v12.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSecurityRequirement(item, compiler.NewContext("security", context)) + if err != nil { + errors = append(errors, err) + } + x.Security = append(x.Security, y) + } + } + } + // repeated NamedAny vendor_extension = 13; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameter creates an object of type Parameter if possible, returning an error if not. +func NewParameter(in interface{}, context *compiler.Context) (*Parameter, error) { + errors := make([]error, 0) + x := &Parameter{} + matched := false + // BodyParameter body_parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBodyParameter(m, compiler.NewContext("bodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_BodyParameter{BodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // NonBodyParameter non_body_parameter = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewNonBodyParameter(m, compiler.NewContext("nonBodyParameter", context)) + if matchingError == nil { + x.Oneof = &Parameter_NonBodyParameter{NonBodyParameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParameterDefinitions creates an object of type ParameterDefinitions if possible, returning an error if not. +func NewParameterDefinitions(in interface{}, context *compiler.Context) (*ParameterDefinitions, error) { + errors := make([]error, 0) + x := &ParameterDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedParameter additional_properties = 1; + // MAP: Parameter + x.AdditionalProperties = make([]*NamedParameter, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedParameter{} + pair.Name = k + var err error + pair.Value, err = NewParameter(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewParametersItem creates an object of type ParametersItem if possible, returning an error if not. +func NewParametersItem(in interface{}, context *compiler.Context) (*ParametersItem, error) { + errors := make([]error, 0) + x := &ParametersItem{} + matched := false + // Parameter parameter = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewParameter(m, compiler.NewContext("parameter", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_Parameter{Parameter: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ParametersItem_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathItem creates an object of type PathItem if possible, returning an error if not. +func NewPathItem(in interface{}, context *compiler.Context) (*PathItem, error) { + errors := make([]error, 0) + x := &PathItem{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "delete", "get", "head", "options", "parameters", "patch", "post", "put"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Operation get = 2; + v2 := compiler.MapValueForKey(m, "get") + if v2 != nil { + var err error + x.Get, err = NewOperation(v2, compiler.NewContext("get", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation put = 3; + v3 := compiler.MapValueForKey(m, "put") + if v3 != nil { + var err error + x.Put, err = NewOperation(v3, compiler.NewContext("put", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation post = 4; + v4 := compiler.MapValueForKey(m, "post") + if v4 != nil { + var err error + x.Post, err = NewOperation(v4, compiler.NewContext("post", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation delete = 5; + v5 := compiler.MapValueForKey(m, "delete") + if v5 != nil { + var err error + x.Delete, err = NewOperation(v5, compiler.NewContext("delete", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation options = 6; + v6 := compiler.MapValueForKey(m, "options") + if v6 != nil { + var err error + x.Options, err = NewOperation(v6, compiler.NewContext("options", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation head = 7; + v7 := compiler.MapValueForKey(m, "head") + if v7 != nil { + var err error + x.Head, err = NewOperation(v7, compiler.NewContext("head", context)) + if err != nil { + errors = append(errors, err) + } + } + // Operation patch = 8; + v8 := compiler.MapValueForKey(m, "patch") + if v8 != nil { + var err error + x.Patch, err = NewOperation(v8, compiler.NewContext("patch", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated ParametersItem parameters = 9; + v9 := compiler.MapValueForKey(m, "parameters") + if v9 != nil { + // repeated ParametersItem + x.Parameters = make([]*ParametersItem, 0) + a, ok := v9.([]interface{}) + if ok { + for _, item := range a { + y, err := NewParametersItem(item, compiler.NewContext("parameters", context)) + if err != nil { + errors = append(errors, err) + } + x.Parameters = append(x.Parameters, y) + } + } + } + // repeated NamedAny vendor_extension = 10; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPathParameterSubSchema creates an object of type PathParameterSubSchema if possible, returning an error if not. +func NewPathParameterSubSchema(in interface{}, context *compiler.Context) (*PathParameterSubSchema, error) { + errors := make([]error, 0) + x := &PathParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"required"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [path] + if ok && !compiler.StringArrayContainsValue([]string{"path"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 5; + v5 := compiler.MapValueForKey(m, "type") + if v5 != nil { + x.Type, ok = v5.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 6; + v6 := compiler.MapValueForKey(m, "format") + if v6 != nil { + x.Format, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 7; + v7 := compiler.MapValueForKey(m, "items") + if v7 != nil { + var err error + x.Items, err = NewPrimitivesItems(v7, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 8; + v8 := compiler.MapValueForKey(m, "collectionFormat") + if v8 != nil { + x.CollectionFormat, ok = v8.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 9; + v9 := compiler.MapValueForKey(m, "default") + if v9 != nil { + var err error + x.Default, err = NewAny(v9, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 10; + v10 := compiler.MapValueForKey(m, "maximum") + if v10 != nil { + switch v10 := v10.(type) { + case float64: + x.Maximum = v10 + case float32: + x.Maximum = float64(v10) + case uint64: + x.Maximum = float64(v10) + case uint32: + x.Maximum = float64(v10) + case int64: + x.Maximum = float64(v10) + case int32: + x.Maximum = float64(v10) + case int: + x.Maximum = float64(v10) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 11; + v11 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v11 != nil { + x.ExclusiveMaximum, ok = v11.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 12; + v12 := compiler.MapValueForKey(m, "minimum") + if v12 != nil { + switch v12 := v12.(type) { + case float64: + x.Minimum = v12 + case float32: + x.Minimum = float64(v12) + case uint64: + x.Minimum = float64(v12) + case uint32: + x.Minimum = float64(v12) + case int64: + x.Minimum = float64(v12) + case int32: + x.Minimum = float64(v12) + case int: + x.Minimum = float64(v12) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 13; + v13 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v13 != nil { + x.ExclusiveMinimum, ok = v13.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 14; + v14 := compiler.MapValueForKey(m, "maxLength") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 15; + v15 := compiler.MapValueForKey(m, "minLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 16; + v16 := compiler.MapValueForKey(m, "pattern") + if v16 != nil { + x.Pattern, ok = v16.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 17; + v17 := compiler.MapValueForKey(m, "maxItems") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 18; + v18 := compiler.MapValueForKey(m, "minItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 19; + v19 := compiler.MapValueForKey(m, "uniqueItems") + if v19 != nil { + x.UniqueItems, ok = v19.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 21; + v21 := compiler.MapValueForKey(m, "multipleOf") + if v21 != nil { + switch v21 := v21.(type) { + case float64: + x.MultipleOf = v21 + case float32: + x.MultipleOf = float64(v21) + case uint64: + x.MultipleOf = float64(v21) + case uint32: + x.MultipleOf = float64(v21) + case int64: + x.MultipleOf = float64(v21) + case int32: + x.MultipleOf = float64(v21) + case int: + x.MultipleOf = float64(v21) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v21, v21) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 22; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPaths creates an object of type Paths if possible, returning an error if not. +func NewPaths(in interface{}, context *compiler.Context) (*Paths, error) { + errors := make([]error, 0) + x := &Paths{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern0, pattern1} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedAny vendor_extension = 1; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + // repeated NamedPathItem path = 2; + // MAP: PathItem ^/ + x.Path = make([]*NamedPathItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "/") { + pair := &NamedPathItem{} + pair.Name = k + var err error + pair.Value, err = NewPathItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.Path = append(x.Path, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewPrimitivesItems creates an object of type PrimitivesItems if possible, returning an error if not. +func NewPrimitivesItems(in interface{}, context *compiler.Context) (*PrimitivesItems, error) { + errors := make([]error, 0) + x := &PrimitivesItems{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"collectionFormat", "default", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "pattern", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string type = 1; + v1 := compiler.MapValueForKey(m, "type") + if v1 != nil { + x.Type, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number integer boolean array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "integer", "boolean", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 3; + v3 := compiler.MapValueForKey(m, "items") + if v3 != nil { + var err error + x.Items, err = NewPrimitivesItems(v3, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 4; + v4 := compiler.MapValueForKey(m, "collectionFormat") + if v4 != nil { + x.CollectionFormat, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 6; + v6 := compiler.MapValueForKey(m, "maximum") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.Maximum = v6 + case float32: + x.Maximum = float64(v6) + case uint64: + x.Maximum = float64(v6) + case uint32: + x.Maximum = float64(v6) + case int64: + x.Maximum = float64(v6) + case int32: + x.Maximum = float64(v6) + case int: + x.Maximum = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 7; + v7 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v7 != nil { + x.ExclusiveMaximum, ok = v7.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 8; + v8 := compiler.MapValueForKey(m, "minimum") + if v8 != nil { + switch v8 := v8.(type) { + case float64: + x.Minimum = v8 + case float32: + x.Minimum = float64(v8) + case uint64: + x.Minimum = float64(v8) + case uint32: + x.Minimum = float64(v8) + case int64: + x.Minimum = float64(v8) + case int32: + x.Minimum = float64(v8) + case int: + x.Minimum = float64(v8) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 9; + v9 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v9 != nil { + x.ExclusiveMinimum, ok = v9.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 10; + v10 := compiler.MapValueForKey(m, "maxLength") + if v10 != nil { + t, ok := v10.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 11; + v11 := compiler.MapValueForKey(m, "minLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 12; + v12 := compiler.MapValueForKey(m, "pattern") + if v12 != nil { + x.Pattern, ok = v12.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 13; + v13 := compiler.MapValueForKey(m, "maxItems") + if v13 != nil { + t, ok := v13.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 14; + v14 := compiler.MapValueForKey(m, "minItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 15; + v15 := compiler.MapValueForKey(m, "uniqueItems") + if v15 != nil { + x.UniqueItems, ok = v15.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 16; + v16 := compiler.MapValueForKey(m, "enum") + if v16 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v16.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 17; + v17 := compiler.MapValueForKey(m, "multipleOf") + if v17 != nil { + switch v17 := v17.(type) { + case float64: + x.MultipleOf = v17 + case float32: + x.MultipleOf = float64(v17) + case uint64: + x.MultipleOf = float64(v17) + case uint32: + x.MultipleOf = float64(v17) + case int64: + x.MultipleOf = float64(v17) + case int32: + x.MultipleOf = float64(v17) + case int: + x.MultipleOf = float64(v17) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 18; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewProperties creates an object of type Properties if possible, returning an error if not. +func NewProperties(in interface{}, context *compiler.Context) (*Properties, error) { + errors := make([]error, 0) + x := &Properties{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSchema additional_properties = 1; + // MAP: Schema + x.AdditionalProperties = make([]*NamedSchema, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSchema{} + pair.Name = k + var err error + pair.Value, err = NewSchema(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewQueryParameterSubSchema creates an object of type QueryParameterSubSchema if possible, returning an error if not. +func NewQueryParameterSubSchema(in interface{}, context *compiler.Context) (*QueryParameterSubSchema, error) { + errors := make([]error, 0) + x := &QueryParameterSubSchema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"allowEmptyValue", "collectionFormat", "default", "description", "enum", "exclusiveMaximum", "exclusiveMinimum", "format", "in", "items", "maxItems", "maxLength", "maximum", "minItems", "minLength", "minimum", "multipleOf", "name", "pattern", "required", "type", "uniqueItems"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // bool required = 1; + v1 := compiler.MapValueForKey(m, "required") + if v1 != nil { + x.Required, ok = v1.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string in = 2; + v2 := compiler.MapValueForKey(m, "in") + if v2 != nil { + x.In, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [query] + if ok && !compiler.StringArrayContainsValue([]string{"query"}, x.In) { + message := fmt.Sprintf("has unexpected value for in: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 3; + v3 := compiler.MapValueForKey(m, "description") + if v3 != nil { + x.Description, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string name = 4; + v4 := compiler.MapValueForKey(m, "name") + if v4 != nil { + x.Name, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool allow_empty_value = 5; + v5 := compiler.MapValueForKey(m, "allowEmptyValue") + if v5 != nil { + x.AllowEmptyValue, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for allowEmptyValue: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string type = 6; + v6 := compiler.MapValueForKey(m, "type") + if v6 != nil { + x.Type, ok = v6.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [string number boolean integer array] + if ok && !compiler.StringArrayContainsValue([]string{"string", "number", "boolean", "integer", "array"}, x.Type) { + message := fmt.Sprintf("has unexpected value for type: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 7; + v7 := compiler.MapValueForKey(m, "format") + if v7 != nil { + x.Format, ok = v7.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // PrimitivesItems items = 8; + v8 := compiler.MapValueForKey(m, "items") + if v8 != nil { + var err error + x.Items, err = NewPrimitivesItems(v8, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // string collection_format = 9; + v9 := compiler.MapValueForKey(m, "collectionFormat") + if v9 != nil { + x.CollectionFormat, ok = v9.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + // check for valid enum values + // [csv ssv tsv pipes multi] + if ok && !compiler.StringArrayContainsValue([]string{"csv", "ssv", "tsv", "pipes", "multi"}, x.CollectionFormat) { + message := fmt.Sprintf("has unexpected value for collectionFormat: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 10; + v10 := compiler.MapValueForKey(m, "default") + if v10 != nil { + var err error + x.Default, err = NewAny(v10, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float maximum = 11; + v11 := compiler.MapValueForKey(m, "maximum") + if v11 != nil { + switch v11 := v11.(type) { + case float64: + x.Maximum = v11 + case float32: + x.Maximum = float64(v11) + case uint64: + x.Maximum = float64(v11) + case uint32: + x.Maximum = float64(v11) + case int64: + x.Maximum = float64(v11) + case int32: + x.Maximum = float64(v11) + case int: + x.Maximum = float64(v11) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 12; + v12 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v12 != nil { + x.ExclusiveMaximum, ok = v12.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 13; + v13 := compiler.MapValueForKey(m, "minimum") + if v13 != nil { + switch v13 := v13.(type) { + case float64: + x.Minimum = v13 + case float32: + x.Minimum = float64(v13) + case uint64: + x.Minimum = float64(v13) + case uint32: + x.Minimum = float64(v13) + case int64: + x.Minimum = float64(v13) + case int32: + x.Minimum = float64(v13) + case int: + x.Minimum = float64(v13) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 14; + v14 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v14 != nil { + x.ExclusiveMinimum, ok = v14.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 15; + v15 := compiler.MapValueForKey(m, "maxLength") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 16; + v16 := compiler.MapValueForKey(m, "minLength") + if v16 != nil { + t, ok := v16.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 17; + v17 := compiler.MapValueForKey(m, "pattern") + if v17 != nil { + x.Pattern, ok = v17.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 18; + v18 := compiler.MapValueForKey(m, "maxItems") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 19; + v19 := compiler.MapValueForKey(m, "minItems") + if v19 != nil { + t, ok := v19.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 20; + v20 := compiler.MapValueForKey(m, "uniqueItems") + if v20 != nil { + x.UniqueItems, ok = v20.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v20, v20) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 21; + v21 := compiler.MapValueForKey(m, "enum") + if v21 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v21.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // float multiple_of = 22; + v22 := compiler.MapValueForKey(m, "multipleOf") + if v22 != nil { + switch v22 := v22.(type) { + case float64: + x.MultipleOf = v22 + case float32: + x.MultipleOf = float64(v22) + case uint64: + x.MultipleOf = float64(v22) + case uint32: + x.MultipleOf = float64(v22) + case int64: + x.MultipleOf = float64(v22) + case int32: + x.MultipleOf = float64(v22) + case int: + x.MultipleOf = float64(v22) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v22, v22) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 23; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponse creates an object of type Response if possible, returning an error if not. +func NewResponse(in interface{}, context *compiler.Context) (*Response, error) { + errors := make([]error, 0) + x := &Response{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"description"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "examples", "headers", "schema"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string description = 1; + v1 := compiler.MapValueForKey(m, "description") + if v1 != nil { + x.Description, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // SchemaItem schema = 2; + v2 := compiler.MapValueForKey(m, "schema") + if v2 != nil { + var err error + x.Schema, err = NewSchemaItem(v2, compiler.NewContext("schema", context)) + if err != nil { + errors = append(errors, err) + } + } + // Headers headers = 3; + v3 := compiler.MapValueForKey(m, "headers") + if v3 != nil { + var err error + x.Headers, err = NewHeaders(v3, compiler.NewContext("headers", context)) + if err != nil { + errors = append(errors, err) + } + } + // Examples examples = 4; + v4 := compiler.MapValueForKey(m, "examples") + if v4 != nil { + var err error + x.Examples, err = NewExamples(v4, compiler.NewContext("examples", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 5; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseDefinitions creates an object of type ResponseDefinitions if possible, returning an error if not. +func NewResponseDefinitions(in interface{}, context *compiler.Context) (*ResponseDefinitions, error) { + errors := make([]error, 0) + x := &ResponseDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedResponse additional_properties = 1; + // MAP: Response + x.AdditionalProperties = make([]*NamedResponse, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedResponse{} + pair.Name = k + var err error + pair.Value, err = NewResponse(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponseValue creates an object of type ResponseValue if possible, returning an error if not. +func NewResponseValue(in interface{}, context *compiler.Context) (*ResponseValue, error) { + errors := make([]error, 0) + x := &ResponseValue{} + matched := false + // Response response = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewResponse(m, compiler.NewContext("response", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_Response{Response: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // JsonReference json_reference = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewJsonReference(m, compiler.NewContext("jsonReference", context)) + if matchingError == nil { + x.Oneof = &ResponseValue_JsonReference{JsonReference: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewResponses creates an object of type Responses if possible, returning an error if not. +func NewResponses(in interface{}, context *compiler.Context) (*Responses, error) { + errors := make([]error, 0) + x := &Responses{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{} + allowedPatterns := []*regexp.Regexp{pattern2, pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // repeated NamedResponseValue response_code = 1; + // MAP: ResponseValue ^([0-9]{3})$|^(default)$ + x.ResponseCode = make([]*NamedResponseValue, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if pattern2.MatchString(k) { + pair := &NamedResponseValue{} + pair.Name = k + var err error + pair.Value, err = NewResponseValue(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.ResponseCode = append(x.ResponseCode, pair) + } + } + } + // repeated NamedAny vendor_extension = 2; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchema creates an object of type Schema if possible, returning an error if not. +func NewSchema(in interface{}, context *compiler.Context) (*Schema, error) { + errors := make([]error, 0) + x := &Schema{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"$ref", "additionalProperties", "allOf", "default", "description", "discriminator", "enum", "example", "exclusiveMaximum", "exclusiveMinimum", "externalDocs", "format", "items", "maxItems", "maxLength", "maxProperties", "maximum", "minItems", "minLength", "minProperties", "minimum", "multipleOf", "pattern", "properties", "readOnly", "required", "title", "type", "uniqueItems", "xml"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string _ref = 1; + v1 := compiler.MapValueForKey(m, "$ref") + if v1 != nil { + x.XRef, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for $ref: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string format = 2; + v2 := compiler.MapValueForKey(m, "format") + if v2 != nil { + x.Format, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for format: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string title = 3; + v3 := compiler.MapValueForKey(m, "title") + if v3 != nil { + x.Title, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for title: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 4; + v4 := compiler.MapValueForKey(m, "description") + if v4 != nil { + x.Description, ok = v4.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Any default = 5; + v5 := compiler.MapValueForKey(m, "default") + if v5 != nil { + var err error + x.Default, err = NewAny(v5, compiler.NewContext("default", context)) + if err != nil { + errors = append(errors, err) + } + } + // float multiple_of = 6; + v6 := compiler.MapValueForKey(m, "multipleOf") + if v6 != nil { + switch v6 := v6.(type) { + case float64: + x.MultipleOf = v6 + case float32: + x.MultipleOf = float64(v6) + case uint64: + x.MultipleOf = float64(v6) + case uint32: + x.MultipleOf = float64(v6) + case int64: + x.MultipleOf = float64(v6) + case int32: + x.MultipleOf = float64(v6) + case int: + x.MultipleOf = float64(v6) + default: + message := fmt.Sprintf("has unexpected value for multipleOf: %+v (%T)", v6, v6) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float maximum = 7; + v7 := compiler.MapValueForKey(m, "maximum") + if v7 != nil { + switch v7 := v7.(type) { + case float64: + x.Maximum = v7 + case float32: + x.Maximum = float64(v7) + case uint64: + x.Maximum = float64(v7) + case uint32: + x.Maximum = float64(v7) + case int64: + x.Maximum = float64(v7) + case int32: + x.Maximum = float64(v7) + case int: + x.Maximum = float64(v7) + default: + message := fmt.Sprintf("has unexpected value for maximum: %+v (%T)", v7, v7) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_maximum = 8; + v8 := compiler.MapValueForKey(m, "exclusiveMaximum") + if v8 != nil { + x.ExclusiveMaximum, ok = v8.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMaximum: %+v (%T)", v8, v8) + errors = append(errors, compiler.NewError(context, message)) + } + } + // float minimum = 9; + v9 := compiler.MapValueForKey(m, "minimum") + if v9 != nil { + switch v9 := v9.(type) { + case float64: + x.Minimum = v9 + case float32: + x.Minimum = float64(v9) + case uint64: + x.Minimum = float64(v9) + case uint32: + x.Minimum = float64(v9) + case int64: + x.Minimum = float64(v9) + case int32: + x.Minimum = float64(v9) + case int: + x.Minimum = float64(v9) + default: + message := fmt.Sprintf("has unexpected value for minimum: %+v (%T)", v9, v9) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool exclusive_minimum = 10; + v10 := compiler.MapValueForKey(m, "exclusiveMinimum") + if v10 != nil { + x.ExclusiveMinimum, ok = v10.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for exclusiveMinimum: %+v (%T)", v10, v10) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_length = 11; + v11 := compiler.MapValueForKey(m, "maxLength") + if v11 != nil { + t, ok := v11.(int) + if ok { + x.MaxLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxLength: %+v (%T)", v11, v11) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_length = 12; + v12 := compiler.MapValueForKey(m, "minLength") + if v12 != nil { + t, ok := v12.(int) + if ok { + x.MinLength = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minLength: %+v (%T)", v12, v12) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string pattern = 13; + v13 := compiler.MapValueForKey(m, "pattern") + if v13 != nil { + x.Pattern, ok = v13.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for pattern: %+v (%T)", v13, v13) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_items = 14; + v14 := compiler.MapValueForKey(m, "maxItems") + if v14 != nil { + t, ok := v14.(int) + if ok { + x.MaxItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxItems: %+v (%T)", v14, v14) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_items = 15; + v15 := compiler.MapValueForKey(m, "minItems") + if v15 != nil { + t, ok := v15.(int) + if ok { + x.MinItems = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minItems: %+v (%T)", v15, v15) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool unique_items = 16; + v16 := compiler.MapValueForKey(m, "uniqueItems") + if v16 != nil { + x.UniqueItems, ok = v16.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for uniqueItems: %+v (%T)", v16, v16) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 max_properties = 17; + v17 := compiler.MapValueForKey(m, "maxProperties") + if v17 != nil { + t, ok := v17.(int) + if ok { + x.MaxProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for maxProperties: %+v (%T)", v17, v17) + errors = append(errors, compiler.NewError(context, message)) + } + } + // int64 min_properties = 18; + v18 := compiler.MapValueForKey(m, "minProperties") + if v18 != nil { + t, ok := v18.(int) + if ok { + x.MinProperties = int64(t) + } else { + message := fmt.Sprintf("has unexpected value for minProperties: %+v (%T)", v18, v18) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated string required = 19; + v19 := compiler.MapValueForKey(m, "required") + if v19 != nil { + v, ok := v19.([]interface{}) + if ok { + x.Required = compiler.ConvertInterfaceArrayToStringArray(v) + } else { + message := fmt.Sprintf("has unexpected value for required: %+v (%T)", v19, v19) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated Any enum = 20; + v20 := compiler.MapValueForKey(m, "enum") + if v20 != nil { + // repeated Any + x.Enum = make([]*Any, 0) + a, ok := v20.([]interface{}) + if ok { + for _, item := range a { + y, err := NewAny(item, compiler.NewContext("enum", context)) + if err != nil { + errors = append(errors, err) + } + x.Enum = append(x.Enum, y) + } + } + } + // AdditionalPropertiesItem additional_properties = 21; + v21 := compiler.MapValueForKey(m, "additionalProperties") + if v21 != nil { + var err error + x.AdditionalProperties, err = NewAdditionalPropertiesItem(v21, compiler.NewContext("additionalProperties", context)) + if err != nil { + errors = append(errors, err) + } + } + // TypeItem type = 22; + v22 := compiler.MapValueForKey(m, "type") + if v22 != nil { + var err error + x.Type, err = NewTypeItem(v22, compiler.NewContext("type", context)) + if err != nil { + errors = append(errors, err) + } + } + // ItemsItem items = 23; + v23 := compiler.MapValueForKey(m, "items") + if v23 != nil { + var err error + x.Items, err = NewItemsItem(v23, compiler.NewContext("items", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated Schema all_of = 24; + v24 := compiler.MapValueForKey(m, "allOf") + if v24 != nil { + // repeated Schema + x.AllOf = make([]*Schema, 0) + a, ok := v24.([]interface{}) + if ok { + for _, item := range a { + y, err := NewSchema(item, compiler.NewContext("allOf", context)) + if err != nil { + errors = append(errors, err) + } + x.AllOf = append(x.AllOf, y) + } + } + } + // Properties properties = 25; + v25 := compiler.MapValueForKey(m, "properties") + if v25 != nil { + var err error + x.Properties, err = NewProperties(v25, compiler.NewContext("properties", context)) + if err != nil { + errors = append(errors, err) + } + } + // string discriminator = 26; + v26 := compiler.MapValueForKey(m, "discriminator") + if v26 != nil { + x.Discriminator, ok = v26.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for discriminator: %+v (%T)", v26, v26) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool read_only = 27; + v27 := compiler.MapValueForKey(m, "readOnly") + if v27 != nil { + x.ReadOnly, ok = v27.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for readOnly: %+v (%T)", v27, v27) + errors = append(errors, compiler.NewError(context, message)) + } + } + // Xml xml = 28; + v28 := compiler.MapValueForKey(m, "xml") + if v28 != nil { + var err error + x.Xml, err = NewXml(v28, compiler.NewContext("xml", context)) + if err != nil { + errors = append(errors, err) + } + } + // ExternalDocs external_docs = 29; + v29 := compiler.MapValueForKey(m, "externalDocs") + if v29 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v29, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // Any example = 30; + v30 := compiler.MapValueForKey(m, "example") + if v30 != nil { + var err error + x.Example, err = NewAny(v30, compiler.NewContext("example", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 31; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSchemaItem creates an object of type SchemaItem if possible, returning an error if not. +func NewSchemaItem(in interface{}, context *compiler.Context) (*SchemaItem, error) { + errors := make([]error, 0) + x := &SchemaItem{} + matched := false + // Schema schema = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewSchema(m, compiler.NewContext("schema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_Schema{Schema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // FileSchema file_schema = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewFileSchema(m, compiler.NewContext("fileSchema", context)) + if matchingError == nil { + x.Oneof = &SchemaItem_FileSchema{FileSchema: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitions creates an object of type SecurityDefinitions if possible, returning an error if not. +func NewSecurityDefinitions(in interface{}, context *compiler.Context) (*SecurityDefinitions, error) { + errors := make([]error, 0) + x := &SecurityDefinitions{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedSecurityDefinitionsItem additional_properties = 1; + // MAP: SecurityDefinitionsItem + x.AdditionalProperties = make([]*NamedSecurityDefinitionsItem, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedSecurityDefinitionsItem{} + pair.Name = k + var err error + pair.Value, err = NewSecurityDefinitionsItem(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityDefinitionsItem creates an object of type SecurityDefinitionsItem if possible, returning an error if not. +func NewSecurityDefinitionsItem(in interface{}, context *compiler.Context) (*SecurityDefinitionsItem, error) { + errors := make([]error, 0) + x := &SecurityDefinitionsItem{} + matched := false + // BasicAuthenticationSecurity basic_authentication_security = 1; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewBasicAuthenticationSecurity(m, compiler.NewContext("basicAuthenticationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{BasicAuthenticationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // ApiKeySecurity api_key_security = 2; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewApiKeySecurity(m, compiler.NewContext("apiKeySecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{ApiKeySecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ImplicitSecurity oauth2_implicit_security = 3; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ImplicitSecurity(m, compiler.NewContext("oauth2ImplicitSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{Oauth2ImplicitSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2PasswordSecurity oauth2_password_security = 4; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2PasswordSecurity(m, compiler.NewContext("oauth2PasswordSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{Oauth2PasswordSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2ApplicationSecurity oauth2_application_security = 5; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2ApplicationSecurity(m, compiler.NewContext("oauth2ApplicationSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{Oauth2ApplicationSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + // Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + { + m, ok := compiler.UnpackMap(in) + if ok { + // errors might be ok here, they mean we just don't have the right subtype + t, matchingError := NewOauth2AccessCodeSecurity(m, compiler.NewContext("oauth2AccessCodeSecurity", context)) + if matchingError == nil { + x.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{Oauth2AccessCodeSecurity: t} + matched = true + } else { + errors = append(errors, matchingError) + } + } + } + if matched { + // since the oneof matched one of its possibilities, discard any matching errors + errors = make([]error, 0) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewSecurityRequirement creates an object of type SecurityRequirement if possible, returning an error if not. +func NewSecurityRequirement(in interface{}, context *compiler.Context) (*SecurityRequirement, error) { + errors := make([]error, 0) + x := &SecurityRequirement{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedStringArray additional_properties = 1; + // MAP: StringArray + x.AdditionalProperties = make([]*NamedStringArray, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedStringArray{} + pair.Name = k + var err error + pair.Value, err = NewStringArray(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewStringArray creates an object of type StringArray if possible, returning an error if not. +func NewStringArray(in interface{}, context *compiler.Context) (*StringArray, error) { + errors := make([]error, 0) + x := &StringArray{} + a, ok := in.([]interface{}) + if !ok { + message := fmt.Sprintf("has unexpected value for StringArray: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + x.Value = make([]string, 0) + for _, s := range a { + x.Value = append(x.Value, s.(string)) + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTag creates an object of type Tag if possible, returning an error if not. +func NewTag(in interface{}, context *compiler.Context) (*Tag, error) { + errors := make([]error, 0) + x := &Tag{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + requiredKeys := []string{"name"} + missingKeys := compiler.MissingKeysInMap(m, requiredKeys) + if len(missingKeys) > 0 { + message := fmt.Sprintf("is missing required %s: %+v", compiler.PluralProperties(len(missingKeys)), strings.Join(missingKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + allowedKeys := []string{"description", "externalDocs", "name"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string description = 2; + v2 := compiler.MapValueForKey(m, "description") + if v2 != nil { + x.Description, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for description: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // ExternalDocs external_docs = 3; + v3 := compiler.MapValueForKey(m, "externalDocs") + if v3 != nil { + var err error + x.ExternalDocs, err = NewExternalDocs(v3, compiler.NewContext("externalDocs", context)) + if err != nil { + errors = append(errors, err) + } + } + // repeated NamedAny vendor_extension = 4; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewTypeItem creates an object of type TypeItem if possible, returning an error if not. +func NewTypeItem(in interface{}, context *compiler.Context) (*TypeItem, error) { + errors := make([]error, 0) + x := &TypeItem{} + switch in := in.(type) { + case string: + x.Value = make([]string, 0) + x.Value = append(x.Value, in) + case []interface{}: + x.Value = make([]string, 0) + for _, v := range in { + value, ok := v.(string) + if ok { + x.Value = append(x.Value, value) + } else { + message := fmt.Sprintf("has unexpected value for string array element: %+v (%T)", value, value) + errors = append(errors, compiler.NewError(context, message)) + } + } + default: + message := fmt.Sprintf("has unexpected value for string array: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewVendorExtension creates an object of type VendorExtension if possible, returning an error if not. +func NewVendorExtension(in interface{}, context *compiler.Context) (*VendorExtension, error) { + errors := make([]error, 0) + x := &VendorExtension{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + // repeated NamedAny additional_properties = 1; + // MAP: Any + x.AdditionalProperties = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.AdditionalProperties = append(x.AdditionalProperties, pair) + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// NewXml creates an object of type Xml if possible, returning an error if not. +func NewXml(in interface{}, context *compiler.Context) (*Xml, error) { + errors := make([]error, 0) + x := &Xml{} + m, ok := compiler.UnpackMap(in) + if !ok { + message := fmt.Sprintf("has unexpected value: %+v (%T)", in, in) + errors = append(errors, compiler.NewError(context, message)) + } else { + allowedKeys := []string{"attribute", "name", "namespace", "prefix", "wrapped"} + allowedPatterns := []*regexp.Regexp{pattern0} + invalidKeys := compiler.InvalidKeysInMap(m, allowedKeys, allowedPatterns) + if len(invalidKeys) > 0 { + message := fmt.Sprintf("has invalid %s: %+v", compiler.PluralProperties(len(invalidKeys)), strings.Join(invalidKeys, ", ")) + errors = append(errors, compiler.NewError(context, message)) + } + // string name = 1; + v1 := compiler.MapValueForKey(m, "name") + if v1 != nil { + x.Name, ok = v1.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for name: %+v (%T)", v1, v1) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string namespace = 2; + v2 := compiler.MapValueForKey(m, "namespace") + if v2 != nil { + x.Namespace, ok = v2.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for namespace: %+v (%T)", v2, v2) + errors = append(errors, compiler.NewError(context, message)) + } + } + // string prefix = 3; + v3 := compiler.MapValueForKey(m, "prefix") + if v3 != nil { + x.Prefix, ok = v3.(string) + if !ok { + message := fmt.Sprintf("has unexpected value for prefix: %+v (%T)", v3, v3) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool attribute = 4; + v4 := compiler.MapValueForKey(m, "attribute") + if v4 != nil { + x.Attribute, ok = v4.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for attribute: %+v (%T)", v4, v4) + errors = append(errors, compiler.NewError(context, message)) + } + } + // bool wrapped = 5; + v5 := compiler.MapValueForKey(m, "wrapped") + if v5 != nil { + x.Wrapped, ok = v5.(bool) + if !ok { + message := fmt.Sprintf("has unexpected value for wrapped: %+v (%T)", v5, v5) + errors = append(errors, compiler.NewError(context, message)) + } + } + // repeated NamedAny vendor_extension = 6; + // MAP: Any ^x- + x.VendorExtension = make([]*NamedAny, 0) + for _, item := range m { + k, ok := compiler.StringValue(item.Key) + if ok { + v := item.Value + if strings.HasPrefix(k, "x-") { + pair := &NamedAny{} + pair.Name = k + result := &Any{} + handled, resultFromExt, err := compiler.HandleExtension(context, v, k) + if handled { + if err != nil { + errors = append(errors, err) + } else { + bytes, _ := yaml.Marshal(v) + result.Yaml = string(bytes) + result.Value = resultFromExt + pair.Value = result + } + } else { + pair.Value, err = NewAny(v, compiler.NewContext(k, context)) + if err != nil { + errors = append(errors, err) + } + } + x.VendorExtension = append(x.VendorExtension, pair) + } + } + } + } + return x, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside AdditionalPropertiesItem objects. +func (m *AdditionalPropertiesItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*AdditionalPropertiesItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Any objects. +func (m *Any) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ApiKeySecurity objects. +func (m *ApiKeySecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BasicAuthenticationSecurity objects. +func (m *BasicAuthenticationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside BodyParameter objects. +func (m *BodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Contact objects. +func (m *Contact) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Default objects. +func (m *Default) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Definitions objects. +func (m *Definitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Document objects. +func (m *Document) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Info != nil { + _, err := m.Info.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Paths != nil { + _, err := m.Paths.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Definitions != nil { + _, err := m.Definitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Parameters != nil { + _, err := m.Parameters.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.SecurityDefinitions != nil { + _, err := m.SecurityDefinitions.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Tags { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Examples objects. +func (m *Examples) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ExternalDocs objects. +func (m *ExternalDocs) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FileSchema objects. +func (m *FileSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside FormDataParameterSubSchema objects. +func (m *FormDataParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Header objects. +func (m *Header) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside HeaderParameterSubSchema objects. +func (m *HeaderParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Headers objects. +func (m *Headers) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Info objects. +func (m *Info) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Contact != nil { + _, err := m.Contact.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.License != nil { + _, err := m.License.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ItemsItem objects. +func (m *ItemsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.Schema { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside JsonReference objects. +func (m *JsonReference) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewJsonReference(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside License objects. +func (m *License) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedAny objects. +func (m *NamedAny) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedHeader objects. +func (m *NamedHeader) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedParameter objects. +func (m *NamedParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedPathItem objects. +func (m *NamedPathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponse objects. +func (m *NamedResponse) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedResponseValue objects. +func (m *NamedResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSchema objects. +func (m *NamedSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedSecurityDefinitionsItem objects. +func (m *NamedSecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedString objects. +func (m *NamedString) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NamedStringArray objects. +func (m *NamedStringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Value != nil { + _, err := m.Value.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside NonBodyParameter objects. +func (m *NonBodyParameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*NonBodyParameter_HeaderParameterSubSchema) + if ok { + _, err := p.HeaderParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_FormDataParameterSubSchema) + if ok { + _, err := p.FormDataParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_QueryParameterSubSchema) + if ok { + _, err := p.QueryParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*NonBodyParameter_PathParameterSubSchema) + if ok { + _, err := p.PathParameterSubSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2AccessCodeSecurity objects. +func (m *Oauth2AccessCodeSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ApplicationSecurity objects. +func (m *Oauth2ApplicationSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2ImplicitSecurity objects. +func (m *Oauth2ImplicitSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2PasswordSecurity objects. +func (m *Oauth2PasswordSecurity) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Scopes != nil { + _, err := m.Scopes.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Oauth2Scopes objects. +func (m *Oauth2Scopes) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Operation objects. +func (m *Operation) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Responses != nil { + _, err := m.Responses.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Security { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Parameter objects. +func (m *Parameter) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*Parameter_BodyParameter) + if ok { + _, err := p.BodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*Parameter_NonBodyParameter) + if ok { + _, err := p.NonBodyParameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParameterDefinitions objects. +func (m *ParameterDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ParametersItem objects. +func (m *ParametersItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ParametersItem_Parameter) + if ok { + _, err := p.Parameter.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ParametersItem_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewParametersItem(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathItem objects. +func (m *PathItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewPathItem(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Get != nil { + _, err := m.Get.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Put != nil { + _, err := m.Put.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Post != nil { + _, err := m.Post.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Delete != nil { + _, err := m.Delete.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Options != nil { + _, err := m.Options.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Head != nil { + _, err := m.Head.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Patch != nil { + _, err := m.Patch.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Parameters { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PathParameterSubSchema objects. +func (m *PathParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Paths objects. +func (m *Paths) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.Path { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside PrimitivesItems objects. +func (m *PrimitivesItems) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Properties objects. +func (m *Properties) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside QueryParameterSubSchema objects. +func (m *QueryParameterSubSchema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Response objects. +func (m *Response) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.Schema != nil { + _, err := m.Schema.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Headers != nil { + _, err := m.Headers.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Examples != nil { + _, err := m.Examples.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseDefinitions objects. +func (m *ResponseDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside ResponseValue objects. +func (m *ResponseValue) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*ResponseValue_Response) + if ok { + _, err := p.Response.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*ResponseValue_JsonReference) + if ok { + info, err := p.JsonReference.ResolveReferences(root) + if err != nil { + return nil, err + } else if info != nil { + n, err := NewResponseValue(info, nil) + if err != nil { + return nil, err + } else if n != nil { + *m = *n + return nil, nil + } + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Responses objects. +func (m *Responses) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.ResponseCode { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Schema objects. +func (m *Schema) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.XRef != "" { + info, err := compiler.ReadInfoForRef(root, m.XRef) + if err != nil { + return nil, err + } + if info != nil { + replacement, err := NewSchema(info, nil) + if err == nil { + *m = *replacement + return m.ResolveReferences(root) + } + } + return info, nil + } + if m.Default != nil { + _, err := m.Default.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.Enum { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.AdditionalProperties != nil { + _, err := m.AdditionalProperties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Type != nil { + _, err := m.Type.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Items != nil { + _, err := m.Items.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.AllOf { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + if m.Properties != nil { + _, err := m.Properties.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Xml != nil { + _, err := m.Xml.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + if m.Example != nil { + _, err := m.Example.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SchemaItem objects. +func (m *SchemaItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SchemaItem_Schema) + if ok { + _, err := p.Schema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SchemaItem_FileSchema) + if ok { + _, err := p.FileSchema.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitions objects. +func (m *SecurityDefinitions) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityDefinitionsItem objects. +func (m *SecurityDefinitionsItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_BasicAuthenticationSecurity) + if ok { + _, err := p.BasicAuthenticationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_ApiKeySecurity) + if ok { + _, err := p.ApiKeySecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ImplicitSecurity) + if ok { + _, err := p.Oauth2ImplicitSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2PasswordSecurity) + if ok { + _, err := p.Oauth2PasswordSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2ApplicationSecurity) + if ok { + _, err := p.Oauth2ApplicationSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + { + p, ok := m.Oneof.(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) + if ok { + _, err := p.Oauth2AccessCodeSecurity.ResolveReferences(root) + if err != nil { + return nil, err + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside SecurityRequirement objects. +func (m *SecurityRequirement) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside StringArray objects. +func (m *StringArray) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Tag objects. +func (m *Tag) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + if m.ExternalDocs != nil { + _, err := m.ExternalDocs.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside TypeItem objects. +func (m *TypeItem) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside VendorExtension objects. +func (m *VendorExtension) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.AdditionalProperties { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ResolveReferences resolves references found inside Xml objects. +func (m *Xml) ResolveReferences(root string) (interface{}, error) { + errors := make([]error, 0) + for _, item := range m.VendorExtension { + if item != nil { + _, err := item.ResolveReferences(root) + if err != nil { + errors = append(errors, err) + } + } + } + return nil, compiler.NewErrorGroupOrNil(errors) +} + +// ToRawInfo returns a description of AdditionalPropertiesItem suitable for JSON or YAML export. +func (m *AdditionalPropertiesItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // AdditionalPropertiesItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:boolean Type:bool StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if v1, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return v1.Boolean + } + return nil +} + +// ToRawInfo returns a description of Any suitable for JSON or YAML export. +func (m *Any) ToRawInfo() interface{} { + var err error + var info1 []yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info1) + if err == nil { + return info1 + } + var info2 yaml.MapSlice + err = yaml.Unmarshal([]byte(m.Yaml), &info2) + if err == nil { + return info2 + } + var info3 interface{} + err = yaml.Unmarshal([]byte(m.Yaml), &info3) + if err == nil { + return info3 + } + return nil +} + +// ToRawInfo returns a description of ApiKeySecurity suitable for JSON or YAML export. +func (m *ApiKeySecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BasicAuthenticationSecurity suitable for JSON or YAML export. +func (m *BasicAuthenticationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of BodyParameter suitable for JSON or YAML export. +func (m *BodyParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Contact suitable for JSON or YAML export. +func (m *Contact) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.Email != "" { + info = append(info, yaml.MapItem{Key: "email", Value: m.Email}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Default suitable for JSON or YAML export. +func (m *Default) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of Definitions suitable for JSON or YAML export. +func (m *Definitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Document suitable for JSON or YAML export. +func (m *Document) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "swagger", Value: m.Swagger}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "info", Value: m.Info.ToRawInfo()}) + // &{Name:info Type:Info StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Host != "" { + info = append(info, yaml.MapItem{Key: "host", Value: m.Host}) + } + if m.BasePath != "" { + info = append(info, yaml.MapItem{Key: "basePath", Value: m.BasePath}) + } + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "paths", Value: m.Paths.ToRawInfo()}) + // &{Name:paths Type:Paths StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Definitions != nil { + info = append(info, yaml.MapItem{Key: "definitions", Value: m.Definitions.ToRawInfo()}) + } + // &{Name:definitions Type:Definitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Parameters != nil { + info = append(info, yaml.MapItem{Key: "parameters", Value: m.Parameters.ToRawInfo()}) + } + // &{Name:parameters Type:ParameterDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Responses != nil { + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + } + // &{Name:responses Type:ResponseDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.SecurityDefinitions != nil { + info = append(info, yaml.MapItem{Key: "securityDefinitions", Value: m.SecurityDefinitions.ToRawInfo()}) + } + // &{Name:securityDefinitions Type:SecurityDefinitions StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Tags) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Tags { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "tags", Value: items}) + } + // &{Name:tags Type:Tag StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Examples suitable for JSON or YAML export. +func (m *Examples) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ExternalDocs suitable for JSON or YAML export. +func (m *ExternalDocs) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FileSchema suitable for JSON or YAML export. +func (m *FileSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of FormDataParameterSubSchema suitable for JSON or YAML export. +func (m *FormDataParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Header suitable for JSON or YAML export. +func (m *Header) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of HeaderParameterSubSchema suitable for JSON or YAML export. +func (m *HeaderParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Headers suitable for JSON or YAML export. +func (m *Headers) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedHeader StringEnumValues:[] MapType:Header Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Info suitable for JSON or YAML export. +func (m *Info) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "version", Value: m.Version}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.TermsOfService != "" { + info = append(info, yaml.MapItem{Key: "termsOfService", Value: m.TermsOfService}) + } + if m.Contact != nil { + info = append(info, yaml.MapItem{Key: "contact", Value: m.Contact.ToRawInfo()}) + } + // &{Name:contact Type:Contact StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.License != nil { + info = append(info, yaml.MapItem{Key: "license", Value: m.License.ToRawInfo()}) + } + // &{Name:license Type:License StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ItemsItem suitable for JSON or YAML export. +func (m *ItemsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Schema) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "schema", Value: items}) + } + // &{Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + return info +} + +// ToRawInfo returns a description of JsonReference suitable for JSON or YAML export. +func (m *JsonReference) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + return info +} + +// ToRawInfo returns a description of License suitable for JSON or YAML export. +func (m *License) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Url != "" { + info = append(info, yaml.MapItem{Key: "url", Value: m.Url}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of NamedAny suitable for JSON or YAML export. +func (m *NamedAny) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedHeader suitable for JSON or YAML export. +func (m *NamedHeader) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Header StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedParameter suitable for JSON or YAML export. +func (m *NamedParameter) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedPathItem suitable for JSON or YAML export. +func (m *NamedPathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:PathItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponse suitable for JSON or YAML export. +func (m *NamedResponse) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedResponseValue suitable for JSON or YAML export. +func (m *NamedResponseValue) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:ResponseValue StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSchema suitable for JSON or YAML export. +func (m *NamedSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedSecurityDefinitionsItem suitable for JSON or YAML export. +func (m *NamedSecurityDefinitionsItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:SecurityDefinitionsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NamedString suitable for JSON or YAML export. +func (m *NamedString) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Value != "" { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of NamedStringArray suitable for JSON or YAML export. +func (m *NamedStringArray) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + // &{Name:value Type:StringArray StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:Mapped value} + return info +} + +// ToRawInfo returns a description of NonBodyParameter suitable for JSON or YAML export. +func (m *NonBodyParameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // NonBodyParameter + // {Name:headerParameterSubSchema Type:HeaderParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetHeaderParameterSubSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:formDataParameterSubSchema Type:FormDataParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFormDataParameterSubSchema() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:queryParameterSubSchema Type:QueryParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetQueryParameterSubSchema() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:pathParameterSubSchema Type:PathParameterSubSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetPathParameterSubSchema() + if v3 != nil { + return v3.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Oauth2AccessCodeSecurity suitable for JSON or YAML export. +func (m *Oauth2AccessCodeSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ApplicationSecurity suitable for JSON or YAML export. +func (m *Oauth2ApplicationSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2ImplicitSecurity suitable for JSON or YAML export. +func (m *Oauth2ImplicitSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "authorizationUrl", Value: m.AuthorizationUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2PasswordSecurity suitable for JSON or YAML export. +func (m *Oauth2PasswordSecurity) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + // always include this required field. + info = append(info, yaml.MapItem{Key: "flow", Value: m.Flow}) + if m.Scopes != nil { + info = append(info, yaml.MapItem{Key: "scopes", Value: m.Scopes.ToRawInfo()}) + } + // &{Name:scopes Type:Oauth2Scopes StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + // always include this required field. + info = append(info, yaml.MapItem{Key: "tokenUrl", Value: m.TokenUrl}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Oauth2Scopes suitable for JSON or YAML export. +func (m *Oauth2Scopes) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // &{Name:additionalProperties Type:NamedString StringEnumValues:[] MapType:string Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Operation suitable for JSON or YAML export. +func (m *Operation) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Tags) != 0 { + info = append(info, yaml.MapItem{Key: "tags", Value: m.Tags}) + } + if m.Summary != "" { + info = append(info, yaml.MapItem{Key: "summary", Value: m.Summary}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.OperationId != "" { + info = append(info, yaml.MapItem{Key: "operationId", Value: m.OperationId}) + } + if len(m.Produces) != 0 { + info = append(info, yaml.MapItem{Key: "produces", Value: m.Produces}) + } + if len(m.Consumes) != 0 { + info = append(info, yaml.MapItem{Key: "consumes", Value: m.Consumes}) + } + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + // always include this required field. + info = append(info, yaml.MapItem{Key: "responses", Value: m.Responses.ToRawInfo()}) + // &{Name:responses Type:Responses StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Schemes) != 0 { + info = append(info, yaml.MapItem{Key: "schemes", Value: m.Schemes}) + } + if m.Deprecated != false { + info = append(info, yaml.MapItem{Key: "deprecated", Value: m.Deprecated}) + } + if len(m.Security) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Security { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "security", Value: items}) + } + // &{Name:security Type:SecurityRequirement StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Parameter suitable for JSON or YAML export. +func (m *Parameter) ToRawInfo() interface{} { + // ONE OF WRAPPER + // Parameter + // {Name:bodyParameter Type:BodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBodyParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:nonBodyParameter Type:NonBodyParameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetNonBodyParameter() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of ParameterDefinitions suitable for JSON or YAML export. +func (m *ParameterDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedParameter StringEnumValues:[] MapType:Parameter Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ParametersItem suitable for JSON or YAML export. +func (m *ParametersItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ParametersItem + // {Name:parameter Type:Parameter StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetParameter() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of PathItem suitable for JSON or YAML export. +func (m *PathItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Get != nil { + info = append(info, yaml.MapItem{Key: "get", Value: m.Get.ToRawInfo()}) + } + // &{Name:get Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Put != nil { + info = append(info, yaml.MapItem{Key: "put", Value: m.Put.ToRawInfo()}) + } + // &{Name:put Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Post != nil { + info = append(info, yaml.MapItem{Key: "post", Value: m.Post.ToRawInfo()}) + } + // &{Name:post Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Delete != nil { + info = append(info, yaml.MapItem{Key: "delete", Value: m.Delete.ToRawInfo()}) + } + // &{Name:delete Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Options != nil { + info = append(info, yaml.MapItem{Key: "options", Value: m.Options.ToRawInfo()}) + } + // &{Name:options Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Head != nil { + info = append(info, yaml.MapItem{Key: "head", Value: m.Head.ToRawInfo()}) + } + // &{Name:head Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Patch != nil { + info = append(info, yaml.MapItem{Key: "patch", Value: m.Patch.ToRawInfo()}) + } + // &{Name:patch Type:Operation StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.Parameters) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Parameters { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "parameters", Value: items}) + } + // &{Name:parameters Type:ParametersItem StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:The parameters needed to send a valid API call.} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PathParameterSubSchema suitable for JSON or YAML export. +func (m *PathParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Paths suitable for JSON or YAML export. +func (m *Paths) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + if m.Path != nil { + for _, item := range m.Path { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:Path Type:NamedPathItem StringEnumValues:[] MapType:PathItem Repeated:true Pattern:^/ Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of PrimitivesItems suitable for JSON or YAML export. +func (m *PrimitivesItems) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Properties suitable for JSON or YAML export. +func (m *Properties) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSchema StringEnumValues:[] MapType:Schema Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of QueryParameterSubSchema suitable for JSON or YAML export. +func (m *QueryParameterSubSchema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Required != false { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if m.In != "" { + info = append(info, yaml.MapItem{Key: "in", Value: m.In}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.AllowEmptyValue != false { + info = append(info, yaml.MapItem{Key: "allowEmptyValue", Value: m.AllowEmptyValue}) + } + if m.Type != "" { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Items != nil { + info = append(info, yaml.MapItem{Key: "items", Value: m.Items.ToRawInfo()}) + } + // &{Name:items Type:PrimitivesItems StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.CollectionFormat != "" { + info = append(info, yaml.MapItem{Key: "collectionFormat", Value: m.CollectionFormat}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Response suitable for JSON or YAML export. +func (m *Response) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + if m.Schema != nil { + info = append(info, yaml.MapItem{Key: "schema", Value: m.Schema.ToRawInfo()}) + } + // &{Name:schema Type:SchemaItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Headers != nil { + info = append(info, yaml.MapItem{Key: "headers", Value: m.Headers.ToRawInfo()}) + } + // &{Name:headers Type:Headers StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Examples != nil { + info = append(info, yaml.MapItem{Key: "examples", Value: m.Examples.ToRawInfo()}) + } + // &{Name:examples Type:Examples StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseDefinitions suitable for JSON or YAML export. +func (m *ResponseDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedResponse StringEnumValues:[] MapType:Response Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of ResponseValue suitable for JSON or YAML export. +func (m *ResponseValue) ToRawInfo() interface{} { + // ONE OF WRAPPER + // ResponseValue + // {Name:response Type:Response StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetResponse() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:jsonReference Type:JsonReference StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetJsonReference() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of Responses suitable for JSON or YAML export. +func (m *Responses) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.ResponseCode != nil { + for _, item := range m.ResponseCode { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:ResponseCode Type:NamedResponseValue StringEnumValues:[] MapType:ResponseValue Repeated:true Pattern:^([0-9]{3})$|^(default)$ Implicit:true Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Schema suitable for JSON or YAML export. +func (m *Schema) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.XRef != "" { + info = append(info, yaml.MapItem{Key: "$ref", Value: m.XRef}) + } + if m.Format != "" { + info = append(info, yaml.MapItem{Key: "format", Value: m.Format}) + } + if m.Title != "" { + info = append(info, yaml.MapItem{Key: "title", Value: m.Title}) + } + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.Default != nil { + info = append(info, yaml.MapItem{Key: "default", Value: m.Default.ToRawInfo()}) + } + // &{Name:default Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.MultipleOf != 0.0 { + info = append(info, yaml.MapItem{Key: "multipleOf", Value: m.MultipleOf}) + } + if m.Maximum != 0.0 { + info = append(info, yaml.MapItem{Key: "maximum", Value: m.Maximum}) + } + if m.ExclusiveMaximum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMaximum", Value: m.ExclusiveMaximum}) + } + if m.Minimum != 0.0 { + info = append(info, yaml.MapItem{Key: "minimum", Value: m.Minimum}) + } + if m.ExclusiveMinimum != false { + info = append(info, yaml.MapItem{Key: "exclusiveMinimum", Value: m.ExclusiveMinimum}) + } + if m.MaxLength != 0 { + info = append(info, yaml.MapItem{Key: "maxLength", Value: m.MaxLength}) + } + if m.MinLength != 0 { + info = append(info, yaml.MapItem{Key: "minLength", Value: m.MinLength}) + } + if m.Pattern != "" { + info = append(info, yaml.MapItem{Key: "pattern", Value: m.Pattern}) + } + if m.MaxItems != 0 { + info = append(info, yaml.MapItem{Key: "maxItems", Value: m.MaxItems}) + } + if m.MinItems != 0 { + info = append(info, yaml.MapItem{Key: "minItems", Value: m.MinItems}) + } + if m.UniqueItems != false { + info = append(info, yaml.MapItem{Key: "uniqueItems", Value: m.UniqueItems}) + } + if m.MaxProperties != 0 { + info = append(info, yaml.MapItem{Key: "maxProperties", Value: m.MaxProperties}) + } + if m.MinProperties != 0 { + info = append(info, yaml.MapItem{Key: "minProperties", Value: m.MinProperties}) + } + if len(m.Required) != 0 { + info = append(info, yaml.MapItem{Key: "required", Value: m.Required}) + } + if len(m.Enum) != 0 { + items := make([]interface{}, 0) + for _, item := range m.Enum { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "enum", Value: items}) + } + // &{Name:enum Type:Any StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.AdditionalProperties != nil { + info = append(info, yaml.MapItem{Key: "additionalProperties", Value: m.AdditionalProperties.ToRawInfo()}) + } + // &{Name:additionalProperties Type:AdditionalPropertiesItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Type != nil { + if len(m.Type.Value) == 1 { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value[0]}) + } else { + info = append(info, yaml.MapItem{Key: "type", Value: m.Type.Value}) + } + } + // &{Name:type Type:TypeItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Items != nil { + items := make([]interface{}, 0) + for _, item := range m.Items.Schema { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "items", Value: items[0]}) + } + // &{Name:items Type:ItemsItem StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if len(m.AllOf) != 0 { + items := make([]interface{}, 0) + for _, item := range m.AllOf { + items = append(items, item.ToRawInfo()) + } + info = append(info, yaml.MapItem{Key: "allOf", Value: items}) + } + // &{Name:allOf Type:Schema StringEnumValues:[] MapType: Repeated:true Pattern: Implicit:false Description:} + if m.Properties != nil { + info = append(info, yaml.MapItem{Key: "properties", Value: m.Properties.ToRawInfo()}) + } + // &{Name:properties Type:Properties StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Discriminator != "" { + info = append(info, yaml.MapItem{Key: "discriminator", Value: m.Discriminator}) + } + if m.ReadOnly != false { + info = append(info, yaml.MapItem{Key: "readOnly", Value: m.ReadOnly}) + } + if m.Xml != nil { + info = append(info, yaml.MapItem{Key: "xml", Value: m.Xml.ToRawInfo()}) + } + // &{Name:xml Type:Xml StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.Example != nil { + info = append(info, yaml.MapItem{Key: "example", Value: m.Example.ToRawInfo()}) + } + // &{Name:example Type:Any StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SchemaItem suitable for JSON or YAML export. +func (m *SchemaItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SchemaItem + // {Name:schema Type:Schema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetSchema() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:fileSchema Type:FileSchema StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetFileSchema() + if v1 != nil { + return v1.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityDefinitions suitable for JSON or YAML export. +func (m *SecurityDefinitions) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedSecurityDefinitionsItem StringEnumValues:[] MapType:SecurityDefinitionsItem Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of SecurityDefinitionsItem suitable for JSON or YAML export. +func (m *SecurityDefinitionsItem) ToRawInfo() interface{} { + // ONE OF WRAPPER + // SecurityDefinitionsItem + // {Name:basicAuthenticationSecurity Type:BasicAuthenticationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v0 := m.GetBasicAuthenticationSecurity() + if v0 != nil { + return v0.ToRawInfo() + } + // {Name:apiKeySecurity Type:ApiKeySecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v1 := m.GetApiKeySecurity() + if v1 != nil { + return v1.ToRawInfo() + } + // {Name:oauth2ImplicitSecurity Type:Oauth2ImplicitSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v2 := m.GetOauth2ImplicitSecurity() + if v2 != nil { + return v2.ToRawInfo() + } + // {Name:oauth2PasswordSecurity Type:Oauth2PasswordSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v3 := m.GetOauth2PasswordSecurity() + if v3 != nil { + return v3.ToRawInfo() + } + // {Name:oauth2ApplicationSecurity Type:Oauth2ApplicationSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v4 := m.GetOauth2ApplicationSecurity() + if v4 != nil { + return v4.ToRawInfo() + } + // {Name:oauth2AccessCodeSecurity Type:Oauth2AccessCodeSecurity StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + v5 := m.GetOauth2AccessCodeSecurity() + if v5 != nil { + return v5.ToRawInfo() + } + return nil +} + +// ToRawInfo returns a description of SecurityRequirement suitable for JSON or YAML export. +func (m *SecurityRequirement) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedStringArray StringEnumValues:[] MapType:StringArray Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of StringArray suitable for JSON or YAML export. +func (m *StringArray) ToRawInfo() interface{} { + return m.Value +} + +// ToRawInfo returns a description of Tag suitable for JSON or YAML export. +func (m *Tag) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + // always include this required field. + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + if m.Description != "" { + info = append(info, yaml.MapItem{Key: "description", Value: m.Description}) + } + if m.ExternalDocs != nil { + info = append(info, yaml.MapItem{Key: "externalDocs", Value: m.ExternalDocs.ToRawInfo()}) + } + // &{Name:externalDocs Type:ExternalDocs StringEnumValues:[] MapType: Repeated:false Pattern: Implicit:false Description:} + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of TypeItem suitable for JSON or YAML export. +func (m *TypeItem) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if len(m.Value) != 0 { + info = append(info, yaml.MapItem{Key: "value", Value: m.Value}) + } + return info +} + +// ToRawInfo returns a description of VendorExtension suitable for JSON or YAML export. +func (m *VendorExtension) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.AdditionalProperties != nil { + for _, item := range m.AdditionalProperties { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:additionalProperties Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern: Implicit:true Description:} + return info +} + +// ToRawInfo returns a description of Xml suitable for JSON or YAML export. +func (m *Xml) ToRawInfo() interface{} { + info := yaml.MapSlice{} + if m == nil { + return info + } + if m.Name != "" { + info = append(info, yaml.MapItem{Key: "name", Value: m.Name}) + } + if m.Namespace != "" { + info = append(info, yaml.MapItem{Key: "namespace", Value: m.Namespace}) + } + if m.Prefix != "" { + info = append(info, yaml.MapItem{Key: "prefix", Value: m.Prefix}) + } + if m.Attribute != false { + info = append(info, yaml.MapItem{Key: "attribute", Value: m.Attribute}) + } + if m.Wrapped != false { + info = append(info, yaml.MapItem{Key: "wrapped", Value: m.Wrapped}) + } + if m.VendorExtension != nil { + for _, item := range m.VendorExtension { + info = append(info, yaml.MapItem{Key: item.Name, Value: item.Value.ToRawInfo()}) + } + } + // &{Name:VendorExtension Type:NamedAny StringEnumValues:[] MapType:Any Repeated:true Pattern:^x- Implicit:true Description:} + return info +} + +var ( + pattern0 = regexp.MustCompile("^x-") + pattern1 = regexp.MustCompile("^/") + pattern2 = regexp.MustCompile("^([0-9]{3})$|^(default)$") +) diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go new file mode 100644 index 000000000000..a030fa67653d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.pb.go @@ -0,0 +1,4455 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: OpenAPIv2/OpenAPIv2.proto + +/* +Package openapi_v2 is a generated protocol buffer package. + +It is generated from these files: + OpenAPIv2/OpenAPIv2.proto + +It has these top-level messages: + AdditionalPropertiesItem + Any + ApiKeySecurity + BasicAuthenticationSecurity + BodyParameter + Contact + Default + Definitions + Document + Examples + ExternalDocs + FileSchema + FormDataParameterSubSchema + Header + HeaderParameterSubSchema + Headers + Info + ItemsItem + JsonReference + License + NamedAny + NamedHeader + NamedParameter + NamedPathItem + NamedResponse + NamedResponseValue + NamedSchema + NamedSecurityDefinitionsItem + NamedString + NamedStringArray + NonBodyParameter + Oauth2AccessCodeSecurity + Oauth2ApplicationSecurity + Oauth2ImplicitSecurity + Oauth2PasswordSecurity + Oauth2Scopes + Operation + Parameter + ParameterDefinitions + ParametersItem + PathItem + PathParameterSubSchema + Paths + PrimitivesItems + Properties + QueryParameterSubSchema + Response + ResponseDefinitions + ResponseValue + Responses + Schema + SchemaItem + SecurityDefinitions + SecurityDefinitionsItem + SecurityRequirement + StringArray + Tag + TypeItem + VendorExtension + Xml +*/ +package openapi_v2 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +type AdditionalPropertiesItem struct { + // Types that are valid to be assigned to Oneof: + // *AdditionalPropertiesItem_Schema + // *AdditionalPropertiesItem_Boolean + Oneof isAdditionalPropertiesItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *AdditionalPropertiesItem) Reset() { *m = AdditionalPropertiesItem{} } +func (m *AdditionalPropertiesItem) String() string { return proto.CompactTextString(m) } +func (*AdditionalPropertiesItem) ProtoMessage() {} +func (*AdditionalPropertiesItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAdditionalPropertiesItem_Oneof interface { + isAdditionalPropertiesItem_Oneof() +} + +type AdditionalPropertiesItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type AdditionalPropertiesItem_Boolean struct { + Boolean bool `protobuf:"varint,2,opt,name=boolean,oneof"` +} + +func (*AdditionalPropertiesItem_Schema) isAdditionalPropertiesItem_Oneof() {} +func (*AdditionalPropertiesItem_Boolean) isAdditionalPropertiesItem_Oneof() {} + +func (m *AdditionalPropertiesItem) GetOneof() isAdditionalPropertiesItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *AdditionalPropertiesItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *AdditionalPropertiesItem) GetBoolean() bool { + if x, ok := m.GetOneof().(*AdditionalPropertiesItem_Boolean); ok { + return x.Boolean + } + return false +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AdditionalPropertiesItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AdditionalPropertiesItem_OneofMarshaler, _AdditionalPropertiesItem_OneofUnmarshaler, _AdditionalPropertiesItem_OneofSizer, []interface{}{ + (*AdditionalPropertiesItem_Schema)(nil), + (*AdditionalPropertiesItem_Boolean)(nil), + } +} + +func _AdditionalPropertiesItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *AdditionalPropertiesItem_Boolean: + t := uint64(0) + if x.Boolean { + t = 1 + } + b.EncodeVarint(2<<3 | proto.WireVarint) + b.EncodeVarint(t) + case nil: + default: + return fmt.Errorf("AdditionalPropertiesItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _AdditionalPropertiesItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AdditionalPropertiesItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &AdditionalPropertiesItem_Schema{msg} + return true, err + case 2: // oneof.boolean + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Oneof = &AdditionalPropertiesItem_Boolean{x != 0} + return true, err + default: + return false, nil + } +} + +func _AdditionalPropertiesItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AdditionalPropertiesItem) + // oneof + switch x := m.Oneof.(type) { + case *AdditionalPropertiesItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AdditionalPropertiesItem_Boolean: + n += proto.SizeVarint(2<<3 | proto.WireVarint) + n += 1 + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Any struct { + Value *google_protobuf.Any `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` + Yaml string `protobuf:"bytes,2,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *Any) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +func (m *Any) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +type ApiKeySecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ApiKeySecurity) Reset() { *m = ApiKeySecurity{} } +func (m *ApiKeySecurity) String() string { return proto.CompactTextString(m) } +func (*ApiKeySecurity) ProtoMessage() {} +func (*ApiKeySecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ApiKeySecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ApiKeySecurity) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ApiKeySecurity) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *ApiKeySecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ApiKeySecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BasicAuthenticationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BasicAuthenticationSecurity) Reset() { *m = BasicAuthenticationSecurity{} } +func (m *BasicAuthenticationSecurity) String() string { return proto.CompactTextString(m) } +func (*BasicAuthenticationSecurity) ProtoMessage() {} +func (*BasicAuthenticationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *BasicAuthenticationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BasicAuthenticationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type BodyParameter struct { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,3,opt,name=in" json:"in,omitempty"` + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,4,opt,name=required" json:"required,omitempty"` + Schema *Schema `protobuf:"bytes,5,opt,name=schema" json:"schema,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *BodyParameter) Reset() { *m = BodyParameter{} } +func (m *BodyParameter) String() string { return proto.CompactTextString(m) } +func (*BodyParameter) ProtoMessage() {} +func (*BodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *BodyParameter) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *BodyParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *BodyParameter) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *BodyParameter) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *BodyParameter) GetSchema() *Schema { + if m != nil { + return m.Schema + } + return nil +} + +func (m *BodyParameter) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Contact information for the owners of the API. +type Contact struct { + // The identifying name of the contact person/organization. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the contact information. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + // The email address of the contact person/organization. + Email string `protobuf:"bytes,3,opt,name=email" json:"email,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Contact) Reset() { *m = Contact{} } +func (m *Contact) String() string { return proto.CompactTextString(m) } +func (*Contact) ProtoMessage() {} +func (*Contact) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *Contact) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Contact) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *Contact) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *Contact) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Default struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Default) Reset() { *m = Default{} } +func (m *Default) String() string { return proto.CompactTextString(m) } +func (*Default) ProtoMessage() {} +func (*Default) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *Default) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +type Definitions struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Definitions) Reset() { *m = Definitions{} } +func (m *Definitions) String() string { return proto.CompactTextString(m) } +func (*Definitions) ProtoMessage() {} +func (*Definitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *Definitions) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Document struct { + // The Swagger version of this document. + Swagger string `protobuf:"bytes,1,opt,name=swagger" json:"swagger,omitempty"` + Info *Info `protobuf:"bytes,2,opt,name=info" json:"info,omitempty"` + // The host (name or ip) of the API. Example: 'swagger.io' + Host string `protobuf:"bytes,3,opt,name=host" json:"host,omitempty"` + // The base path to the API. Example: '/api'. + BasePath string `protobuf:"bytes,4,opt,name=base_path,json=basePath" json:"base_path,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,5,rep,name=schemes" json:"schemes,omitempty"` + // A list of MIME types accepted by the API. + Consumes []string `protobuf:"bytes,6,rep,name=consumes" json:"consumes,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,7,rep,name=produces" json:"produces,omitempty"` + Paths *Paths `protobuf:"bytes,8,opt,name=paths" json:"paths,omitempty"` + Definitions *Definitions `protobuf:"bytes,9,opt,name=definitions" json:"definitions,omitempty"` + Parameters *ParameterDefinitions `protobuf:"bytes,10,opt,name=parameters" json:"parameters,omitempty"` + Responses *ResponseDefinitions `protobuf:"bytes,11,opt,name=responses" json:"responses,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + SecurityDefinitions *SecurityDefinitions `protobuf:"bytes,13,opt,name=security_definitions,json=securityDefinitions" json:"security_definitions,omitempty"` + Tags []*Tag `protobuf:"bytes,14,rep,name=tags" json:"tags,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,15,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,16,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Document) Reset() { *m = Document{} } +func (m *Document) String() string { return proto.CompactTextString(m) } +func (*Document) ProtoMessage() {} +func (*Document) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *Document) GetSwagger() string { + if m != nil { + return m.Swagger + } + return "" +} + +func (m *Document) GetInfo() *Info { + if m != nil { + return m.Info + } + return nil +} + +func (m *Document) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Document) GetBasePath() string { + if m != nil { + return m.BasePath + } + return "" +} + +func (m *Document) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Document) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Document) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Document) GetPaths() *Paths { + if m != nil { + return m.Paths + } + return nil +} + +func (m *Document) GetDefinitions() *Definitions { + if m != nil { + return m.Definitions + } + return nil +} + +func (m *Document) GetParameters() *ParameterDefinitions { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Document) GetResponses() *ResponseDefinitions { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Document) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Document) GetSecurityDefinitions() *SecurityDefinitions { + if m != nil { + return m.SecurityDefinitions + } + return nil +} + +func (m *Document) GetTags() []*Tag { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Document) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Document) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Examples struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Examples) Reset() { *m = Examples{} } +func (m *Examples) String() string { return proto.CompactTextString(m) } +func (*Examples) ProtoMessage() {} +func (*Examples) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *Examples) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// information about external documentation +type ExternalDocs struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *ExternalDocs) Reset() { *m = ExternalDocs{} } +func (m *ExternalDocs) String() string { return proto.CompactTextString(m) } +func (*ExternalDocs) ProtoMessage() {} +func (*ExternalDocs) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ExternalDocs) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ExternalDocs) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *ExternalDocs) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type FileSchema struct { + Format string `protobuf:"bytes,1,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,2,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,4,opt,name=default" json:"default,omitempty"` + Required []string `protobuf:"bytes,5,rep,name=required" json:"required,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + ReadOnly bool `protobuf:"varint,7,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,8,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,9,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FileSchema) Reset() { *m = FileSchema{} } +func (m *FileSchema) String() string { return proto.CompactTextString(m) } +func (*FileSchema) ProtoMessage() {} +func (*FileSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *FileSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FileSchema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *FileSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FileSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FileSchema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *FileSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FileSchema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *FileSchema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *FileSchema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *FileSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type FormDataParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *FormDataParameterSubSchema) Reset() { *m = FormDataParameterSubSchema{} } +func (m *FormDataParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*FormDataParameterSubSchema) ProtoMessage() {} +func (*FormDataParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *FormDataParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *FormDataParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *FormDataParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *FormDataParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *FormDataParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *FormDataParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *FormDataParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *FormDataParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *FormDataParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *FormDataParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *FormDataParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *FormDataParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *FormDataParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *FormDataParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Header struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Description string `protobuf:"bytes,18,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,19,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *Header) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Header) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Header) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *Header) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *Header) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Header) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Header) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Header) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Header) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Header) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Header) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Header) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Header) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Header) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Header) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Header) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Header) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Header) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Header) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type HeaderParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *HeaderParameterSubSchema) Reset() { *m = HeaderParameterSubSchema{} } +func (m *HeaderParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*HeaderParameterSubSchema) ProtoMessage() {} +func (*HeaderParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *HeaderParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *HeaderParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *HeaderParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *HeaderParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *HeaderParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *HeaderParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *HeaderParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *HeaderParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *HeaderParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *HeaderParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *HeaderParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *HeaderParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *HeaderParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Headers struct { + AdditionalProperties []*NamedHeader `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Headers) Reset() { *m = Headers{} } +func (m *Headers) String() string { return proto.CompactTextString(m) } +func (*Headers) ProtoMessage() {} +func (*Headers) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *Headers) GetAdditionalProperties() []*NamedHeader { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +// General information about the API. +type Info struct { + // A unique and precise title of the API. + Title string `protobuf:"bytes,1,opt,name=title" json:"title,omitempty"` + // A semantic version number of the API. + Version string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"` + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The terms of service for the API. + TermsOfService string `protobuf:"bytes,4,opt,name=terms_of_service,json=termsOfService" json:"terms_of_service,omitempty"` + Contact *Contact `protobuf:"bytes,5,opt,name=contact" json:"contact,omitempty"` + License *License `protobuf:"bytes,6,opt,name=license" json:"license,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Info) Reset() { *m = Info{} } +func (m *Info) String() string { return proto.CompactTextString(m) } +func (*Info) ProtoMessage() {} +func (*Info) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *Info) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Info) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Info) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Info) GetTermsOfService() string { + if m != nil { + return m.TermsOfService + } + return "" +} + +func (m *Info) GetContact() *Contact { + if m != nil { + return m.Contact + } + return nil +} + +func (m *Info) GetLicense() *License { + if m != nil { + return m.License + } + return nil +} + +func (m *Info) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type ItemsItem struct { + Schema []*Schema `protobuf:"bytes,1,rep,name=schema" json:"schema,omitempty"` +} + +func (m *ItemsItem) Reset() { *m = ItemsItem{} } +func (m *ItemsItem) String() string { return proto.CompactTextString(m) } +func (*ItemsItem) ProtoMessage() {} +func (*ItemsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ItemsItem) GetSchema() []*Schema { + if m != nil { + return m.Schema + } + return nil +} + +type JsonReference struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` +} + +func (m *JsonReference) Reset() { *m = JsonReference{} } +func (m *JsonReference) String() string { return proto.CompactTextString(m) } +func (*JsonReference) ProtoMessage() {} +func (*JsonReference) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *JsonReference) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *JsonReference) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +type License struct { + // The name of the license type. It's encouraged to use an OSI compatible license. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The URL pointing to the license. + Url string `protobuf:"bytes,2,opt,name=url" json:"url,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,3,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *License) Reset() { *m = License{} } +func (m *License) String() string { return proto.CompactTextString(m) } +func (*License) ProtoMessage() {} +func (*License) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *License) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *License) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *License) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +type NamedAny struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedAny) Reset() { *m = NamedAny{} } +func (m *NamedAny) String() string { return proto.CompactTextString(m) } +func (*NamedAny) ProtoMessage() {} +func (*NamedAny) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } + +func (m *NamedAny) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedAny) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +type NamedHeader struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Header `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedHeader) Reset() { *m = NamedHeader{} } +func (m *NamedHeader) String() string { return proto.CompactTextString(m) } +func (*NamedHeader) ProtoMessage() {} +func (*NamedHeader) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } + +func (m *NamedHeader) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedHeader) GetValue() *Header { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +type NamedParameter struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Parameter `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedParameter) Reset() { *m = NamedParameter{} } +func (m *NamedParameter) String() string { return proto.CompactTextString(m) } +func (*NamedParameter) ProtoMessage() {} +func (*NamedParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } + +func (m *NamedParameter) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedParameter) GetValue() *Parameter { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +type NamedPathItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *PathItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedPathItem) Reset() { *m = NamedPathItem{} } +func (m *NamedPathItem) String() string { return proto.CompactTextString(m) } +func (*NamedPathItem) ProtoMessage() {} +func (*NamedPathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } + +func (m *NamedPathItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedPathItem) GetValue() *PathItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +type NamedResponse struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Response `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponse) Reset() { *m = NamedResponse{} } +func (m *NamedResponse) String() string { return proto.CompactTextString(m) } +func (*NamedResponse) ProtoMessage() {} +func (*NamedResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } + +func (m *NamedResponse) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponse) GetValue() *Response { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +type NamedResponseValue struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *ResponseValue `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedResponseValue) Reset() { *m = NamedResponseValue{} } +func (m *NamedResponseValue) String() string { return proto.CompactTextString(m) } +func (*NamedResponseValue) ProtoMessage() {} +func (*NamedResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } + +func (m *NamedResponseValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedResponseValue) GetValue() *ResponseValue { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +type NamedSchema struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *Schema `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSchema) Reset() { *m = NamedSchema{} } +func (m *NamedSchema) String() string { return proto.CompactTextString(m) } +func (*NamedSchema) ProtoMessage() {} +func (*NamedSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } + +func (m *NamedSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSchema) GetValue() *Schema { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +type NamedSecurityDefinitionsItem struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *SecurityDefinitionsItem `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedSecurityDefinitionsItem) Reset() { *m = NamedSecurityDefinitionsItem{} } +func (m *NamedSecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*NamedSecurityDefinitionsItem) ProtoMessage() {} +func (*NamedSecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } + +func (m *NamedSecurityDefinitionsItem) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedSecurityDefinitionsItem) GetValue() *SecurityDefinitionsItem { + if m != nil { + return m.Value + } + return nil +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +type NamedString struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedString) Reset() { *m = NamedString{} } +func (m *NamedString) String() string { return proto.CompactTextString(m) } +func (*NamedString) ProtoMessage() {} +func (*NamedString) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } + +func (m *NamedString) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedString) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +type NamedStringArray struct { + // Map key + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Mapped value + Value *StringArray `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *NamedStringArray) Reset() { *m = NamedStringArray{} } +func (m *NamedStringArray) String() string { return proto.CompactTextString(m) } +func (*NamedStringArray) ProtoMessage() {} +func (*NamedStringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } + +func (m *NamedStringArray) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *NamedStringArray) GetValue() *StringArray { + if m != nil { + return m.Value + } + return nil +} + +type NonBodyParameter struct { + // Types that are valid to be assigned to Oneof: + // *NonBodyParameter_HeaderParameterSubSchema + // *NonBodyParameter_FormDataParameterSubSchema + // *NonBodyParameter_QueryParameterSubSchema + // *NonBodyParameter_PathParameterSubSchema + Oneof isNonBodyParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *NonBodyParameter) Reset() { *m = NonBodyParameter{} } +func (m *NonBodyParameter) String() string { return proto.CompactTextString(m) } +func (*NonBodyParameter) ProtoMessage() {} +func (*NonBodyParameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } + +type isNonBodyParameter_Oneof interface { + isNonBodyParameter_Oneof() +} + +type NonBodyParameter_HeaderParameterSubSchema struct { + HeaderParameterSubSchema *HeaderParameterSubSchema `protobuf:"bytes,1,opt,name=header_parameter_sub_schema,json=headerParameterSubSchema,oneof"` +} +type NonBodyParameter_FormDataParameterSubSchema struct { + FormDataParameterSubSchema *FormDataParameterSubSchema `protobuf:"bytes,2,opt,name=form_data_parameter_sub_schema,json=formDataParameterSubSchema,oneof"` +} +type NonBodyParameter_QueryParameterSubSchema struct { + QueryParameterSubSchema *QueryParameterSubSchema `protobuf:"bytes,3,opt,name=query_parameter_sub_schema,json=queryParameterSubSchema,oneof"` +} +type NonBodyParameter_PathParameterSubSchema struct { + PathParameterSubSchema *PathParameterSubSchema `protobuf:"bytes,4,opt,name=path_parameter_sub_schema,json=pathParameterSubSchema,oneof"` +} + +func (*NonBodyParameter_HeaderParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_FormDataParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_QueryParameterSubSchema) isNonBodyParameter_Oneof() {} +func (*NonBodyParameter_PathParameterSubSchema) isNonBodyParameter_Oneof() {} + +func (m *NonBodyParameter) GetOneof() isNonBodyParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *NonBodyParameter) GetHeaderParameterSubSchema() *HeaderParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_HeaderParameterSubSchema); ok { + return x.HeaderParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetFormDataParameterSubSchema() *FormDataParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_FormDataParameterSubSchema); ok { + return x.FormDataParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetQueryParameterSubSchema() *QueryParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_QueryParameterSubSchema); ok { + return x.QueryParameterSubSchema + } + return nil +} + +func (m *NonBodyParameter) GetPathParameterSubSchema() *PathParameterSubSchema { + if x, ok := m.GetOneof().(*NonBodyParameter_PathParameterSubSchema); ok { + return x.PathParameterSubSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*NonBodyParameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _NonBodyParameter_OneofMarshaler, _NonBodyParameter_OneofUnmarshaler, _NonBodyParameter_OneofSizer, []interface{}{ + (*NonBodyParameter_HeaderParameterSubSchema)(nil), + (*NonBodyParameter_FormDataParameterSubSchema)(nil), + (*NonBodyParameter_QueryParameterSubSchema)(nil), + (*NonBodyParameter_PathParameterSubSchema)(nil), + } +} + +func _NonBodyParameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HeaderParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_FormDataParameterSubSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FormDataParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_QueryParameterSubSchema: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryParameterSubSchema); err != nil { + return err + } + case *NonBodyParameter_PathParameterSubSchema: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PathParameterSubSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("NonBodyParameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _NonBodyParameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*NonBodyParameter) + switch tag { + case 1: // oneof.header_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HeaderParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_HeaderParameterSubSchema{msg} + return true, err + case 2: // oneof.form_data_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FormDataParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_FormDataParameterSubSchema{msg} + return true, err + case 3: // oneof.query_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_QueryParameterSubSchema{msg} + return true, err + case 4: // oneof.path_parameter_sub_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PathParameterSubSchema) + err := b.DecodeMessage(msg) + m.Oneof = &NonBodyParameter_PathParameterSubSchema{msg} + return true, err + default: + return false, nil + } +} + +func _NonBodyParameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*NonBodyParameter) + // oneof + switch x := m.Oneof.(type) { + case *NonBodyParameter_HeaderParameterSubSchema: + s := proto.Size(x.HeaderParameterSubSchema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_FormDataParameterSubSchema: + s := proto.Size(x.FormDataParameterSubSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_QueryParameterSubSchema: + s := proto.Size(x.QueryParameterSubSchema) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *NonBodyParameter_PathParameterSubSchema: + s := proto.Size(x.PathParameterSubSchema) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Oauth2AccessCodeSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + TokenUrl string `protobuf:"bytes,5,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,6,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,7,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2AccessCodeSecurity) Reset() { *m = Oauth2AccessCodeSecurity{} } +func (m *Oauth2AccessCodeSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2AccessCodeSecurity) ProtoMessage() {} +func (*Oauth2AccessCodeSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } + +func (m *Oauth2AccessCodeSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2AccessCodeSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2AccessCodeSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ApplicationSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ApplicationSecurity) Reset() { *m = Oauth2ApplicationSecurity{} } +func (m *Oauth2ApplicationSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ApplicationSecurity) ProtoMessage() {} +func (*Oauth2ApplicationSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } + +func (m *Oauth2ApplicationSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ApplicationSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ApplicationSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2ImplicitSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + AuthorizationUrl string `protobuf:"bytes,4,opt,name=authorization_url,json=authorizationUrl" json:"authorization_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2ImplicitSecurity) Reset() { *m = Oauth2ImplicitSecurity{} } +func (m *Oauth2ImplicitSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2ImplicitSecurity) ProtoMessage() {} +func (*Oauth2ImplicitSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } + +func (m *Oauth2ImplicitSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2ImplicitSecurity) GetAuthorizationUrl() string { + if m != nil { + return m.AuthorizationUrl + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2ImplicitSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2PasswordSecurity struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Flow string `protobuf:"bytes,2,opt,name=flow" json:"flow,omitempty"` + Scopes *Oauth2Scopes `protobuf:"bytes,3,opt,name=scopes" json:"scopes,omitempty"` + TokenUrl string `protobuf:"bytes,4,opt,name=token_url,json=tokenUrl" json:"token_url,omitempty"` + Description string `protobuf:"bytes,5,opt,name=description" json:"description,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Oauth2PasswordSecurity) Reset() { *m = Oauth2PasswordSecurity{} } +func (m *Oauth2PasswordSecurity) String() string { return proto.CompactTextString(m) } +func (*Oauth2PasswordSecurity) ProtoMessage() {} +func (*Oauth2PasswordSecurity) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } + +func (m *Oauth2PasswordSecurity) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetFlow() string { + if m != nil { + return m.Flow + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetScopes() *Oauth2Scopes { + if m != nil { + return m.Scopes + } + return nil +} + +func (m *Oauth2PasswordSecurity) GetTokenUrl() string { + if m != nil { + return m.TokenUrl + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Oauth2PasswordSecurity) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Oauth2Scopes struct { + AdditionalProperties []*NamedString `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Oauth2Scopes) Reset() { *m = Oauth2Scopes{} } +func (m *Oauth2Scopes) String() string { return proto.CompactTextString(m) } +func (*Oauth2Scopes) ProtoMessage() {} +func (*Oauth2Scopes) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } + +func (m *Oauth2Scopes) GetAdditionalProperties() []*NamedString { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Operation struct { + Tags []string `protobuf:"bytes,1,rep,name=tags" json:"tags,omitempty"` + // A brief summary of the operation. + Summary string `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"` + // A longer description of the operation, GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,4,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + // A unique identifier of the operation. + OperationId string `protobuf:"bytes,5,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // A list of MIME types the API can produce. + Produces []string `protobuf:"bytes,6,rep,name=produces" json:"produces,omitempty"` + // A list of MIME types the API can consume. + Consumes []string `protobuf:"bytes,7,rep,name=consumes" json:"consumes,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,8,rep,name=parameters" json:"parameters,omitempty"` + Responses *Responses `protobuf:"bytes,9,opt,name=responses" json:"responses,omitempty"` + // The transfer protocol of the API. + Schemes []string `protobuf:"bytes,10,rep,name=schemes" json:"schemes,omitempty"` + Deprecated bool `protobuf:"varint,11,opt,name=deprecated" json:"deprecated,omitempty"` + Security []*SecurityRequirement `protobuf:"bytes,12,rep,name=security" json:"security,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,13,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Operation) Reset() { *m = Operation{} } +func (m *Operation) String() string { return proto.CompactTextString(m) } +func (*Operation) ProtoMessage() {} +func (*Operation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } + +func (m *Operation) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *Operation) GetSummary() string { + if m != nil { + return m.Summary + } + return "" +} + +func (m *Operation) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Operation) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Operation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *Operation) GetProduces() []string { + if m != nil { + return m.Produces + } + return nil +} + +func (m *Operation) GetConsumes() []string { + if m != nil { + return m.Consumes + } + return nil +} + +func (m *Operation) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *Operation) GetResponses() *Responses { + if m != nil { + return m.Responses + } + return nil +} + +func (m *Operation) GetSchemes() []string { + if m != nil { + return m.Schemes + } + return nil +} + +func (m *Operation) GetDeprecated() bool { + if m != nil { + return m.Deprecated + } + return false +} + +func (m *Operation) GetSecurity() []*SecurityRequirement { + if m != nil { + return m.Security + } + return nil +} + +func (m *Operation) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Parameter struct { + // Types that are valid to be assigned to Oneof: + // *Parameter_BodyParameter + // *Parameter_NonBodyParameter + Oneof isParameter_Oneof `protobuf_oneof:"oneof"` +} + +func (m *Parameter) Reset() { *m = Parameter{} } +func (m *Parameter) String() string { return proto.CompactTextString(m) } +func (*Parameter) ProtoMessage() {} +func (*Parameter) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } + +type isParameter_Oneof interface { + isParameter_Oneof() +} + +type Parameter_BodyParameter struct { + BodyParameter *BodyParameter `protobuf:"bytes,1,opt,name=body_parameter,json=bodyParameter,oneof"` +} +type Parameter_NonBodyParameter struct { + NonBodyParameter *NonBodyParameter `protobuf:"bytes,2,opt,name=non_body_parameter,json=nonBodyParameter,oneof"` +} + +func (*Parameter_BodyParameter) isParameter_Oneof() {} +func (*Parameter_NonBodyParameter) isParameter_Oneof() {} + +func (m *Parameter) GetOneof() isParameter_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *Parameter) GetBodyParameter() *BodyParameter { + if x, ok := m.GetOneof().(*Parameter_BodyParameter); ok { + return x.BodyParameter + } + return nil +} + +func (m *Parameter) GetNonBodyParameter() *NonBodyParameter { + if x, ok := m.GetOneof().(*Parameter_NonBodyParameter); ok { + return x.NonBodyParameter + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Parameter) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Parameter_OneofMarshaler, _Parameter_OneofUnmarshaler, _Parameter_OneofSizer, []interface{}{ + (*Parameter_BodyParameter)(nil), + (*Parameter_NonBodyParameter)(nil), + } +} + +func _Parameter_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BodyParameter); err != nil { + return err + } + case *Parameter_NonBodyParameter: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.NonBodyParameter); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Parameter.Oneof has unexpected type %T", x) + } + return nil +} + +func _Parameter_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Parameter) + switch tag { + case 1: // oneof.body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_BodyParameter{msg} + return true, err + case 2: // oneof.non_body_parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(NonBodyParameter) + err := b.DecodeMessage(msg) + m.Oneof = &Parameter_NonBodyParameter{msg} + return true, err + default: + return false, nil + } +} + +func _Parameter_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Parameter) + // oneof + switch x := m.Oneof.(type) { + case *Parameter_BodyParameter: + s := proto.Size(x.BodyParameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Parameter_NonBodyParameter: + s := proto.Size(x.NonBodyParameter) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// One or more JSON representations for parameters +type ParameterDefinitions struct { + AdditionalProperties []*NamedParameter `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ParameterDefinitions) Reset() { *m = ParameterDefinitions{} } +func (m *ParameterDefinitions) String() string { return proto.CompactTextString(m) } +func (*ParameterDefinitions) ProtoMessage() {} +func (*ParameterDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } + +func (m *ParameterDefinitions) GetAdditionalProperties() []*NamedParameter { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ParametersItem struct { + // Types that are valid to be assigned to Oneof: + // *ParametersItem_Parameter + // *ParametersItem_JsonReference + Oneof isParametersItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ParametersItem) Reset() { *m = ParametersItem{} } +func (m *ParametersItem) String() string { return proto.CompactTextString(m) } +func (*ParametersItem) ProtoMessage() {} +func (*ParametersItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } + +type isParametersItem_Oneof interface { + isParametersItem_Oneof() +} + +type ParametersItem_Parameter struct { + Parameter *Parameter `protobuf:"bytes,1,opt,name=parameter,oneof"` +} +type ParametersItem_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ParametersItem_Parameter) isParametersItem_Oneof() {} +func (*ParametersItem_JsonReference) isParametersItem_Oneof() {} + +func (m *ParametersItem) GetOneof() isParametersItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ParametersItem) GetParameter() *Parameter { + if x, ok := m.GetOneof().(*ParametersItem_Parameter); ok { + return x.Parameter + } + return nil +} + +func (m *ParametersItem) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ParametersItem_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ParametersItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ParametersItem_OneofMarshaler, _ParametersItem_OneofUnmarshaler, _ParametersItem_OneofSizer, []interface{}{ + (*ParametersItem_Parameter)(nil), + (*ParametersItem_JsonReference)(nil), + } +} + +func _ParametersItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Parameter); err != nil { + return err + } + case *ParametersItem_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ParametersItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _ParametersItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ParametersItem) + switch tag { + case 1: // oneof.parameter + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Parameter) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_Parameter{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ParametersItem_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ParametersItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ParametersItem) + // oneof + switch x := m.Oneof.(type) { + case *ParametersItem_Parameter: + s := proto.Size(x.Parameter) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ParametersItem_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type PathItem struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Get *Operation `protobuf:"bytes,2,opt,name=get" json:"get,omitempty"` + Put *Operation `protobuf:"bytes,3,opt,name=put" json:"put,omitempty"` + Post *Operation `protobuf:"bytes,4,opt,name=post" json:"post,omitempty"` + Delete *Operation `protobuf:"bytes,5,opt,name=delete" json:"delete,omitempty"` + Options *Operation `protobuf:"bytes,6,opt,name=options" json:"options,omitempty"` + Head *Operation `protobuf:"bytes,7,opt,name=head" json:"head,omitempty"` + Patch *Operation `protobuf:"bytes,8,opt,name=patch" json:"patch,omitempty"` + // The parameters needed to send a valid API call. + Parameters []*ParametersItem `protobuf:"bytes,9,rep,name=parameters" json:"parameters,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,10,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathItem) Reset() { *m = PathItem{} } +func (m *PathItem) String() string { return proto.CompactTextString(m) } +func (*PathItem) ProtoMessage() {} +func (*PathItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } + +func (m *PathItem) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *PathItem) GetGet() *Operation { + if m != nil { + return m.Get + } + return nil +} + +func (m *PathItem) GetPut() *Operation { + if m != nil { + return m.Put + } + return nil +} + +func (m *PathItem) GetPost() *Operation { + if m != nil { + return m.Post + } + return nil +} + +func (m *PathItem) GetDelete() *Operation { + if m != nil { + return m.Delete + } + return nil +} + +func (m *PathItem) GetOptions() *Operation { + if m != nil { + return m.Options + } + return nil +} + +func (m *PathItem) GetHead() *Operation { + if m != nil { + return m.Head + } + return nil +} + +func (m *PathItem) GetPatch() *Operation { + if m != nil { + return m.Patch + } + return nil +} + +func (m *PathItem) GetParameters() []*ParametersItem { + if m != nil { + return m.Parameters + } + return nil +} + +func (m *PathItem) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type PathParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + Type string `protobuf:"bytes,5,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,6,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,7,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,8,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,9,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,10,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,11,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,12,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,13,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,14,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,15,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,16,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,17,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,18,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,19,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,21,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,22,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PathParameterSubSchema) Reset() { *m = PathParameterSubSchema{} } +func (m *PathParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*PathParameterSubSchema) ProtoMessage() {} +func (*PathParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } + +func (m *PathParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *PathParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *PathParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PathParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PathParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PathParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PathParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PathParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PathParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PathParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PathParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PathParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PathParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PathParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PathParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PathParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PathParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PathParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PathParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +type Paths struct { + VendorExtension []*NamedAny `protobuf:"bytes,1,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` + Path []*NamedPathItem `protobuf:"bytes,2,rep,name=path" json:"path,omitempty"` +} + +func (m *Paths) Reset() { *m = Paths{} } +func (m *Paths) String() string { return proto.CompactTextString(m) } +func (*Paths) ProtoMessage() {} +func (*Paths) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } + +func (m *Paths) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func (m *Paths) GetPath() []*NamedPathItem { + if m != nil { + return m.Path + } + return nil +} + +type PrimitivesItems struct { + Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,3,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,4,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,6,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,7,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,8,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,9,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,10,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,11,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,12,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,13,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,14,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,15,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,16,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,17,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,18,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *PrimitivesItems) Reset() { *m = PrimitivesItems{} } +func (m *PrimitivesItems) String() string { return proto.CompactTextString(m) } +func (*PrimitivesItems) ProtoMessage() {} +func (*PrimitivesItems) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{43} } + +func (m *PrimitivesItems) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *PrimitivesItems) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *PrimitivesItems) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *PrimitivesItems) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *PrimitivesItems) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *PrimitivesItems) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *PrimitivesItems) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *PrimitivesItems) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *PrimitivesItems) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *PrimitivesItems) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *PrimitivesItems) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *PrimitivesItems) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *PrimitivesItems) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *PrimitivesItems) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *PrimitivesItems) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *PrimitivesItems) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *PrimitivesItems) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Properties struct { + AdditionalProperties []*NamedSchema `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *Properties) Reset() { *m = Properties{} } +func (m *Properties) String() string { return proto.CompactTextString(m) } +func (*Properties) ProtoMessage() {} +func (*Properties) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{44} } + +func (m *Properties) GetAdditionalProperties() []*NamedSchema { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type QueryParameterSubSchema struct { + // Determines whether or not this parameter is required or optional. + Required bool `protobuf:"varint,1,opt,name=required" json:"required,omitempty"` + // Determines the location of the parameter. + In string `protobuf:"bytes,2,opt,name=in" json:"in,omitempty"` + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + Description string `protobuf:"bytes,3,opt,name=description" json:"description,omitempty"` + // The name of the parameter. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // allows sending a parameter by name only or with an empty value. + AllowEmptyValue bool `protobuf:"varint,5,opt,name=allow_empty_value,json=allowEmptyValue" json:"allow_empty_value,omitempty"` + Type string `protobuf:"bytes,6,opt,name=type" json:"type,omitempty"` + Format string `protobuf:"bytes,7,opt,name=format" json:"format,omitempty"` + Items *PrimitivesItems `protobuf:"bytes,8,opt,name=items" json:"items,omitempty"` + CollectionFormat string `protobuf:"bytes,9,opt,name=collection_format,json=collectionFormat" json:"collection_format,omitempty"` + Default *Any `protobuf:"bytes,10,opt,name=default" json:"default,omitempty"` + Maximum float64 `protobuf:"fixed64,11,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,12,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,13,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,14,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,15,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,16,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,17,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,18,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,19,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,20,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + Enum []*Any `protobuf:"bytes,21,rep,name=enum" json:"enum,omitempty"` + MultipleOf float64 `protobuf:"fixed64,22,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,23,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *QueryParameterSubSchema) Reset() { *m = QueryParameterSubSchema{} } +func (m *QueryParameterSubSchema) String() string { return proto.CompactTextString(m) } +func (*QueryParameterSubSchema) ProtoMessage() {} +func (*QueryParameterSubSchema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{45} } + +func (m *QueryParameterSubSchema) GetRequired() bool { + if m != nil { + return m.Required + } + return false +} + +func (m *QueryParameterSubSchema) GetIn() string { + if m != nil { + return m.In + } + return "" +} + +func (m *QueryParameterSubSchema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *QueryParameterSubSchema) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *QueryParameterSubSchema) GetAllowEmptyValue() bool { + if m != nil { + return m.AllowEmptyValue + } + return false +} + +func (m *QueryParameterSubSchema) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *QueryParameterSubSchema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *QueryParameterSubSchema) GetItems() *PrimitivesItems { + if m != nil { + return m.Items + } + return nil +} + +func (m *QueryParameterSubSchema) GetCollectionFormat() string { + if m != nil { + return m.CollectionFormat + } + return "" +} + +func (m *QueryParameterSubSchema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *QueryParameterSubSchema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *QueryParameterSubSchema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *QueryParameterSubSchema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *QueryParameterSubSchema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *QueryParameterSubSchema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *QueryParameterSubSchema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *QueryParameterSubSchema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *QueryParameterSubSchema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *QueryParameterSubSchema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *QueryParameterSubSchema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type Response struct { + Description string `protobuf:"bytes,1,opt,name=description" json:"description,omitempty"` + Schema *SchemaItem `protobuf:"bytes,2,opt,name=schema" json:"schema,omitempty"` + Headers *Headers `protobuf:"bytes,3,opt,name=headers" json:"headers,omitempty"` + Examples *Examples `protobuf:"bytes,4,opt,name=examples" json:"examples,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,5,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{46} } + +func (m *Response) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Response) GetSchema() *SchemaItem { + if m != nil { + return m.Schema + } + return nil +} + +func (m *Response) GetHeaders() *Headers { + if m != nil { + return m.Headers + } + return nil +} + +func (m *Response) GetExamples() *Examples { + if m != nil { + return m.Examples + } + return nil +} + +func (m *Response) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// One or more JSON representations for parameters +type ResponseDefinitions struct { + AdditionalProperties []*NamedResponse `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *ResponseDefinitions) Reset() { *m = ResponseDefinitions{} } +func (m *ResponseDefinitions) String() string { return proto.CompactTextString(m) } +func (*ResponseDefinitions) ProtoMessage() {} +func (*ResponseDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{47} } + +func (m *ResponseDefinitions) GetAdditionalProperties() []*NamedResponse { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type ResponseValue struct { + // Types that are valid to be assigned to Oneof: + // *ResponseValue_Response + // *ResponseValue_JsonReference + Oneof isResponseValue_Oneof `protobuf_oneof:"oneof"` +} + +func (m *ResponseValue) Reset() { *m = ResponseValue{} } +func (m *ResponseValue) String() string { return proto.CompactTextString(m) } +func (*ResponseValue) ProtoMessage() {} +func (*ResponseValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{48} } + +type isResponseValue_Oneof interface { + isResponseValue_Oneof() +} + +type ResponseValue_Response struct { + Response *Response `protobuf:"bytes,1,opt,name=response,oneof"` +} +type ResponseValue_JsonReference struct { + JsonReference *JsonReference `protobuf:"bytes,2,opt,name=json_reference,json=jsonReference,oneof"` +} + +func (*ResponseValue_Response) isResponseValue_Oneof() {} +func (*ResponseValue_JsonReference) isResponseValue_Oneof() {} + +func (m *ResponseValue) GetOneof() isResponseValue_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *ResponseValue) GetResponse() *Response { + if x, ok := m.GetOneof().(*ResponseValue_Response); ok { + return x.Response + } + return nil +} + +func (m *ResponseValue) GetJsonReference() *JsonReference { + if x, ok := m.GetOneof().(*ResponseValue_JsonReference); ok { + return x.JsonReference + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ResponseValue) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ResponseValue_OneofMarshaler, _ResponseValue_OneofUnmarshaler, _ResponseValue_OneofSizer, []interface{}{ + (*ResponseValue_Response)(nil), + (*ResponseValue_JsonReference)(nil), + } +} + +func _ResponseValue_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Response); err != nil { + return err + } + case *ResponseValue_JsonReference: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.JsonReference); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ResponseValue.Oneof has unexpected type %T", x) + } + return nil +} + +func _ResponseValue_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ResponseValue) + switch tag { + case 1: // oneof.response + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Response) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_Response{msg} + return true, err + case 2: // oneof.json_reference + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(JsonReference) + err := b.DecodeMessage(msg) + m.Oneof = &ResponseValue_JsonReference{msg} + return true, err + default: + return false, nil + } +} + +func _ResponseValue_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ResponseValue) + // oneof + switch x := m.Oneof.(type) { + case *ResponseValue_Response: + s := proto.Size(x.Response) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ResponseValue_JsonReference: + s := proto.Size(x.JsonReference) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Response objects names can either be any valid HTTP status code or 'default'. +type Responses struct { + ResponseCode []*NamedResponseValue `protobuf:"bytes,1,rep,name=response_code,json=responseCode" json:"response_code,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,2,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Responses) Reset() { *m = Responses{} } +func (m *Responses) String() string { return proto.CompactTextString(m) } +func (*Responses) ProtoMessage() {} +func (*Responses) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{49} } + +func (m *Responses) GetResponseCode() []*NamedResponseValue { + if m != nil { + return m.ResponseCode + } + return nil +} + +func (m *Responses) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +// A deterministic version of a JSON Schema object. +type Schema struct { + XRef string `protobuf:"bytes,1,opt,name=_ref,json=Ref" json:"_ref,omitempty"` + Format string `protobuf:"bytes,2,opt,name=format" json:"format,omitempty"` + Title string `protobuf:"bytes,3,opt,name=title" json:"title,omitempty"` + Description string `protobuf:"bytes,4,opt,name=description" json:"description,omitempty"` + Default *Any `protobuf:"bytes,5,opt,name=default" json:"default,omitempty"` + MultipleOf float64 `protobuf:"fixed64,6,opt,name=multiple_of,json=multipleOf" json:"multiple_of,omitempty"` + Maximum float64 `protobuf:"fixed64,7,opt,name=maximum" json:"maximum,omitempty"` + ExclusiveMaximum bool `protobuf:"varint,8,opt,name=exclusive_maximum,json=exclusiveMaximum" json:"exclusive_maximum,omitempty"` + Minimum float64 `protobuf:"fixed64,9,opt,name=minimum" json:"minimum,omitempty"` + ExclusiveMinimum bool `protobuf:"varint,10,opt,name=exclusive_minimum,json=exclusiveMinimum" json:"exclusive_minimum,omitempty"` + MaxLength int64 `protobuf:"varint,11,opt,name=max_length,json=maxLength" json:"max_length,omitempty"` + MinLength int64 `protobuf:"varint,12,opt,name=min_length,json=minLength" json:"min_length,omitempty"` + Pattern string `protobuf:"bytes,13,opt,name=pattern" json:"pattern,omitempty"` + MaxItems int64 `protobuf:"varint,14,opt,name=max_items,json=maxItems" json:"max_items,omitempty"` + MinItems int64 `protobuf:"varint,15,opt,name=min_items,json=minItems" json:"min_items,omitempty"` + UniqueItems bool `protobuf:"varint,16,opt,name=unique_items,json=uniqueItems" json:"unique_items,omitempty"` + MaxProperties int64 `protobuf:"varint,17,opt,name=max_properties,json=maxProperties" json:"max_properties,omitempty"` + MinProperties int64 `protobuf:"varint,18,opt,name=min_properties,json=minProperties" json:"min_properties,omitempty"` + Required []string `protobuf:"bytes,19,rep,name=required" json:"required,omitempty"` + Enum []*Any `protobuf:"bytes,20,rep,name=enum" json:"enum,omitempty"` + AdditionalProperties *AdditionalPropertiesItem `protobuf:"bytes,21,opt,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` + Type *TypeItem `protobuf:"bytes,22,opt,name=type" json:"type,omitempty"` + Items *ItemsItem `protobuf:"bytes,23,opt,name=items" json:"items,omitempty"` + AllOf []*Schema `protobuf:"bytes,24,rep,name=all_of,json=allOf" json:"all_of,omitempty"` + Properties *Properties `protobuf:"bytes,25,opt,name=properties" json:"properties,omitempty"` + Discriminator string `protobuf:"bytes,26,opt,name=discriminator" json:"discriminator,omitempty"` + ReadOnly bool `protobuf:"varint,27,opt,name=read_only,json=readOnly" json:"read_only,omitempty"` + Xml *Xml `protobuf:"bytes,28,opt,name=xml" json:"xml,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,29,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + Example *Any `protobuf:"bytes,30,opt,name=example" json:"example,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,31,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Schema) Reset() { *m = Schema{} } +func (m *Schema) String() string { return proto.CompactTextString(m) } +func (*Schema) ProtoMessage() {} +func (*Schema) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{50} } + +func (m *Schema) GetXRef() string { + if m != nil { + return m.XRef + } + return "" +} + +func (m *Schema) GetFormat() string { + if m != nil { + return m.Format + } + return "" +} + +func (m *Schema) GetTitle() string { + if m != nil { + return m.Title + } + return "" +} + +func (m *Schema) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Schema) GetDefault() *Any { + if m != nil { + return m.Default + } + return nil +} + +func (m *Schema) GetMultipleOf() float64 { + if m != nil { + return m.MultipleOf + } + return 0 +} + +func (m *Schema) GetMaximum() float64 { + if m != nil { + return m.Maximum + } + return 0 +} + +func (m *Schema) GetExclusiveMaximum() bool { + if m != nil { + return m.ExclusiveMaximum + } + return false +} + +func (m *Schema) GetMinimum() float64 { + if m != nil { + return m.Minimum + } + return 0 +} + +func (m *Schema) GetExclusiveMinimum() bool { + if m != nil { + return m.ExclusiveMinimum + } + return false +} + +func (m *Schema) GetMaxLength() int64 { + if m != nil { + return m.MaxLength + } + return 0 +} + +func (m *Schema) GetMinLength() int64 { + if m != nil { + return m.MinLength + } + return 0 +} + +func (m *Schema) GetPattern() string { + if m != nil { + return m.Pattern + } + return "" +} + +func (m *Schema) GetMaxItems() int64 { + if m != nil { + return m.MaxItems + } + return 0 +} + +func (m *Schema) GetMinItems() int64 { + if m != nil { + return m.MinItems + } + return 0 +} + +func (m *Schema) GetUniqueItems() bool { + if m != nil { + return m.UniqueItems + } + return false +} + +func (m *Schema) GetMaxProperties() int64 { + if m != nil { + return m.MaxProperties + } + return 0 +} + +func (m *Schema) GetMinProperties() int64 { + if m != nil { + return m.MinProperties + } + return 0 +} + +func (m *Schema) GetRequired() []string { + if m != nil { + return m.Required + } + return nil +} + +func (m *Schema) GetEnum() []*Any { + if m != nil { + return m.Enum + } + return nil +} + +func (m *Schema) GetAdditionalProperties() *AdditionalPropertiesItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +func (m *Schema) GetType() *TypeItem { + if m != nil { + return m.Type + } + return nil +} + +func (m *Schema) GetItems() *ItemsItem { + if m != nil { + return m.Items + } + return nil +} + +func (m *Schema) GetAllOf() []*Schema { + if m != nil { + return m.AllOf + } + return nil +} + +func (m *Schema) GetProperties() *Properties { + if m != nil { + return m.Properties + } + return nil +} + +func (m *Schema) GetDiscriminator() string { + if m != nil { + return m.Discriminator + } + return "" +} + +func (m *Schema) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +func (m *Schema) GetXml() *Xml { + if m != nil { + return m.Xml + } + return nil +} + +func (m *Schema) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Schema) GetExample() *Any { + if m != nil { + return m.Example + } + return nil +} + +func (m *Schema) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type SchemaItem struct { + // Types that are valid to be assigned to Oneof: + // *SchemaItem_Schema + // *SchemaItem_FileSchema + Oneof isSchemaItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SchemaItem) Reset() { *m = SchemaItem{} } +func (m *SchemaItem) String() string { return proto.CompactTextString(m) } +func (*SchemaItem) ProtoMessage() {} +func (*SchemaItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{51} } + +type isSchemaItem_Oneof interface { + isSchemaItem_Oneof() +} + +type SchemaItem_Schema struct { + Schema *Schema `protobuf:"bytes,1,opt,name=schema,oneof"` +} +type SchemaItem_FileSchema struct { + FileSchema *FileSchema `protobuf:"bytes,2,opt,name=file_schema,json=fileSchema,oneof"` +} + +func (*SchemaItem_Schema) isSchemaItem_Oneof() {} +func (*SchemaItem_FileSchema) isSchemaItem_Oneof() {} + +func (m *SchemaItem) GetOneof() isSchemaItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SchemaItem) GetSchema() *Schema { + if x, ok := m.GetOneof().(*SchemaItem_Schema); ok { + return x.Schema + } + return nil +} + +func (m *SchemaItem) GetFileSchema() *FileSchema { + if x, ok := m.GetOneof().(*SchemaItem_FileSchema); ok { + return x.FileSchema + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SchemaItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SchemaItem_OneofMarshaler, _SchemaItem_OneofUnmarshaler, _SchemaItem_OneofSizer, []interface{}{ + (*SchemaItem_Schema)(nil), + (*SchemaItem_FileSchema)(nil), + } +} + +func _SchemaItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Schema); err != nil { + return err + } + case *SchemaItem_FileSchema: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FileSchema); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SchemaItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SchemaItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SchemaItem) + switch tag { + case 1: // oneof.schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Schema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_Schema{msg} + return true, err + case 2: // oneof.file_schema + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(FileSchema) + err := b.DecodeMessage(msg) + m.Oneof = &SchemaItem_FileSchema{msg} + return true, err + default: + return false, nil + } +} + +func _SchemaItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SchemaItem) + // oneof + switch x := m.Oneof.(type) { + case *SchemaItem_Schema: + s := proto.Size(x.Schema) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SchemaItem_FileSchema: + s := proto.Size(x.FileSchema) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityDefinitions struct { + AdditionalProperties []*NamedSecurityDefinitionsItem `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityDefinitions) Reset() { *m = SecurityDefinitions{} } +func (m *SecurityDefinitions) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitions) ProtoMessage() {} +func (*SecurityDefinitions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{52} } + +func (m *SecurityDefinitions) GetAdditionalProperties() []*NamedSecurityDefinitionsItem { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type SecurityDefinitionsItem struct { + // Types that are valid to be assigned to Oneof: + // *SecurityDefinitionsItem_BasicAuthenticationSecurity + // *SecurityDefinitionsItem_ApiKeySecurity + // *SecurityDefinitionsItem_Oauth2ImplicitSecurity + // *SecurityDefinitionsItem_Oauth2PasswordSecurity + // *SecurityDefinitionsItem_Oauth2ApplicationSecurity + // *SecurityDefinitionsItem_Oauth2AccessCodeSecurity + Oneof isSecurityDefinitionsItem_Oneof `protobuf_oneof:"oneof"` +} + +func (m *SecurityDefinitionsItem) Reset() { *m = SecurityDefinitionsItem{} } +func (m *SecurityDefinitionsItem) String() string { return proto.CompactTextString(m) } +func (*SecurityDefinitionsItem) ProtoMessage() {} +func (*SecurityDefinitionsItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{53} } + +type isSecurityDefinitionsItem_Oneof interface { + isSecurityDefinitionsItem_Oneof() +} + +type SecurityDefinitionsItem_BasicAuthenticationSecurity struct { + BasicAuthenticationSecurity *BasicAuthenticationSecurity `protobuf:"bytes,1,opt,name=basic_authentication_security,json=basicAuthenticationSecurity,oneof"` +} +type SecurityDefinitionsItem_ApiKeySecurity struct { + ApiKeySecurity *ApiKeySecurity `protobuf:"bytes,2,opt,name=api_key_security,json=apiKeySecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ImplicitSecurity struct { + Oauth2ImplicitSecurity *Oauth2ImplicitSecurity `protobuf:"bytes,3,opt,name=oauth2_implicit_security,json=oauth2ImplicitSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2PasswordSecurity struct { + Oauth2PasswordSecurity *Oauth2PasswordSecurity `protobuf:"bytes,4,opt,name=oauth2_password_security,json=oauth2PasswordSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2ApplicationSecurity struct { + Oauth2ApplicationSecurity *Oauth2ApplicationSecurity `protobuf:"bytes,5,opt,name=oauth2_application_security,json=oauth2ApplicationSecurity,oneof"` +} +type SecurityDefinitionsItem_Oauth2AccessCodeSecurity struct { + Oauth2AccessCodeSecurity *Oauth2AccessCodeSecurity `protobuf:"bytes,6,opt,name=oauth2_access_code_security,json=oauth2AccessCodeSecurity,oneof"` +} + +func (*SecurityDefinitionsItem_BasicAuthenticationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_ApiKeySecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ImplicitSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2PasswordSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2ApplicationSecurity) isSecurityDefinitionsItem_Oneof() {} +func (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity) isSecurityDefinitionsItem_Oneof() {} + +func (m *SecurityDefinitionsItem) GetOneof() isSecurityDefinitionsItem_Oneof { + if m != nil { + return m.Oneof + } + return nil +} + +func (m *SecurityDefinitionsItem) GetBasicAuthenticationSecurity() *BasicAuthenticationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_BasicAuthenticationSecurity); ok { + return x.BasicAuthenticationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetApiKeySecurity() *ApiKeySecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_ApiKeySecurity); ok { + return x.ApiKeySecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ImplicitSecurity() *Oauth2ImplicitSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ImplicitSecurity); ok { + return x.Oauth2ImplicitSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2PasswordSecurity() *Oauth2PasswordSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2PasswordSecurity); ok { + return x.Oauth2PasswordSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2ApplicationSecurity() *Oauth2ApplicationSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2ApplicationSecurity); ok { + return x.Oauth2ApplicationSecurity + } + return nil +} + +func (m *SecurityDefinitionsItem) GetOauth2AccessCodeSecurity() *Oauth2AccessCodeSecurity { + if x, ok := m.GetOneof().(*SecurityDefinitionsItem_Oauth2AccessCodeSecurity); ok { + return x.Oauth2AccessCodeSecurity + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SecurityDefinitionsItem) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SecurityDefinitionsItem_OneofMarshaler, _SecurityDefinitionsItem_OneofUnmarshaler, _SecurityDefinitionsItem_OneofSizer, []interface{}{ + (*SecurityDefinitionsItem_BasicAuthenticationSecurity)(nil), + (*SecurityDefinitionsItem_ApiKeySecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ImplicitSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2PasswordSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2ApplicationSecurity)(nil), + (*SecurityDefinitionsItem_Oauth2AccessCodeSecurity)(nil), + } +} + +func _SecurityDefinitionsItem_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BasicAuthenticationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_ApiKeySecurity: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ApiKeySecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ImplicitSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2PasswordSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2ApplicationSecurity); err != nil { + return err + } + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Oauth2AccessCodeSecurity); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SecurityDefinitionsItem.Oneof has unexpected type %T", x) + } + return nil +} + +func _SecurityDefinitionsItem_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SecurityDefinitionsItem) + switch tag { + case 1: // oneof.basic_authentication_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(BasicAuthenticationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_BasicAuthenticationSecurity{msg} + return true, err + case 2: // oneof.api_key_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ApiKeySecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_ApiKeySecurity{msg} + return true, err + case 3: // oneof.oauth2_implicit_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ImplicitSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ImplicitSecurity{msg} + return true, err + case 4: // oneof.oauth2_password_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2PasswordSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2PasswordSecurity{msg} + return true, err + case 5: // oneof.oauth2_application_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2ApplicationSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2ApplicationSecurity{msg} + return true, err + case 6: // oneof.oauth2_access_code_security + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Oauth2AccessCodeSecurity) + err := b.DecodeMessage(msg) + m.Oneof = &SecurityDefinitionsItem_Oauth2AccessCodeSecurity{msg} + return true, err + default: + return false, nil + } +} + +func _SecurityDefinitionsItem_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SecurityDefinitionsItem) + // oneof + switch x := m.Oneof.(type) { + case *SecurityDefinitionsItem_BasicAuthenticationSecurity: + s := proto.Size(x.BasicAuthenticationSecurity) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_ApiKeySecurity: + s := proto.Size(x.ApiKeySecurity) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ImplicitSecurity: + s := proto.Size(x.Oauth2ImplicitSecurity) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2PasswordSecurity: + s := proto.Size(x.Oauth2PasswordSecurity) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2ApplicationSecurity: + s := proto.Size(x.Oauth2ApplicationSecurity) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *SecurityDefinitionsItem_Oauth2AccessCodeSecurity: + s := proto.Size(x.Oauth2AccessCodeSecurity) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type SecurityRequirement struct { + AdditionalProperties []*NamedStringArray `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *SecurityRequirement) Reset() { *m = SecurityRequirement{} } +func (m *SecurityRequirement) String() string { return proto.CompactTextString(m) } +func (*SecurityRequirement) ProtoMessage() {} +func (*SecurityRequirement) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{54} } + +func (m *SecurityRequirement) GetAdditionalProperties() []*NamedStringArray { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type StringArray struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *StringArray) Reset() { *m = StringArray{} } +func (m *StringArray) String() string { return proto.CompactTextString(m) } +func (*StringArray) ProtoMessage() {} +func (*StringArray) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{55} } + +func (m *StringArray) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +type Tag struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description" json:"description,omitempty"` + ExternalDocs *ExternalDocs `protobuf:"bytes,3,opt,name=external_docs,json=externalDocs" json:"external_docs,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,4,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{56} } + +func (m *Tag) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Tag) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *Tag) GetExternalDocs() *ExternalDocs { + if m != nil { + return m.ExternalDocs + } + return nil +} + +func (m *Tag) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +type TypeItem struct { + Value []string `protobuf:"bytes,1,rep,name=value" json:"value,omitempty"` +} + +func (m *TypeItem) Reset() { *m = TypeItem{} } +func (m *TypeItem) String() string { return proto.CompactTextString(m) } +func (*TypeItem) ProtoMessage() {} +func (*TypeItem) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{57} } + +func (m *TypeItem) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +// Any property starting with x- is valid. +type VendorExtension struct { + AdditionalProperties []*NamedAny `protobuf:"bytes,1,rep,name=additional_properties,json=additionalProperties" json:"additional_properties,omitempty"` +} + +func (m *VendorExtension) Reset() { *m = VendorExtension{} } +func (m *VendorExtension) String() string { return proto.CompactTextString(m) } +func (*VendorExtension) ProtoMessage() {} +func (*VendorExtension) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{58} } + +func (m *VendorExtension) GetAdditionalProperties() []*NamedAny { + if m != nil { + return m.AdditionalProperties + } + return nil +} + +type Xml struct { + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace" json:"namespace,omitempty"` + Prefix string `protobuf:"bytes,3,opt,name=prefix" json:"prefix,omitempty"` + Attribute bool `protobuf:"varint,4,opt,name=attribute" json:"attribute,omitempty"` + Wrapped bool `protobuf:"varint,5,opt,name=wrapped" json:"wrapped,omitempty"` + VendorExtension []*NamedAny `protobuf:"bytes,6,rep,name=vendor_extension,json=vendorExtension" json:"vendor_extension,omitempty"` +} + +func (m *Xml) Reset() { *m = Xml{} } +func (m *Xml) String() string { return proto.CompactTextString(m) } +func (*Xml) ProtoMessage() {} +func (*Xml) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{59} } + +func (m *Xml) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Xml) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *Xml) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Xml) GetAttribute() bool { + if m != nil { + return m.Attribute + } + return false +} + +func (m *Xml) GetWrapped() bool { + if m != nil { + return m.Wrapped + } + return false +} + +func (m *Xml) GetVendorExtension() []*NamedAny { + if m != nil { + return m.VendorExtension + } + return nil +} + +func init() { + proto.RegisterType((*AdditionalPropertiesItem)(nil), "openapi.v2.AdditionalPropertiesItem") + proto.RegisterType((*Any)(nil), "openapi.v2.Any") + proto.RegisterType((*ApiKeySecurity)(nil), "openapi.v2.ApiKeySecurity") + proto.RegisterType((*BasicAuthenticationSecurity)(nil), "openapi.v2.BasicAuthenticationSecurity") + proto.RegisterType((*BodyParameter)(nil), "openapi.v2.BodyParameter") + proto.RegisterType((*Contact)(nil), "openapi.v2.Contact") + proto.RegisterType((*Default)(nil), "openapi.v2.Default") + proto.RegisterType((*Definitions)(nil), "openapi.v2.Definitions") + proto.RegisterType((*Document)(nil), "openapi.v2.Document") + proto.RegisterType((*Examples)(nil), "openapi.v2.Examples") + proto.RegisterType((*ExternalDocs)(nil), "openapi.v2.ExternalDocs") + proto.RegisterType((*FileSchema)(nil), "openapi.v2.FileSchema") + proto.RegisterType((*FormDataParameterSubSchema)(nil), "openapi.v2.FormDataParameterSubSchema") + proto.RegisterType((*Header)(nil), "openapi.v2.Header") + proto.RegisterType((*HeaderParameterSubSchema)(nil), "openapi.v2.HeaderParameterSubSchema") + proto.RegisterType((*Headers)(nil), "openapi.v2.Headers") + proto.RegisterType((*Info)(nil), "openapi.v2.Info") + proto.RegisterType((*ItemsItem)(nil), "openapi.v2.ItemsItem") + proto.RegisterType((*JsonReference)(nil), "openapi.v2.JsonReference") + proto.RegisterType((*License)(nil), "openapi.v2.License") + proto.RegisterType((*NamedAny)(nil), "openapi.v2.NamedAny") + proto.RegisterType((*NamedHeader)(nil), "openapi.v2.NamedHeader") + proto.RegisterType((*NamedParameter)(nil), "openapi.v2.NamedParameter") + proto.RegisterType((*NamedPathItem)(nil), "openapi.v2.NamedPathItem") + proto.RegisterType((*NamedResponse)(nil), "openapi.v2.NamedResponse") + proto.RegisterType((*NamedResponseValue)(nil), "openapi.v2.NamedResponseValue") + proto.RegisterType((*NamedSchema)(nil), "openapi.v2.NamedSchema") + proto.RegisterType((*NamedSecurityDefinitionsItem)(nil), "openapi.v2.NamedSecurityDefinitionsItem") + proto.RegisterType((*NamedString)(nil), "openapi.v2.NamedString") + proto.RegisterType((*NamedStringArray)(nil), "openapi.v2.NamedStringArray") + proto.RegisterType((*NonBodyParameter)(nil), "openapi.v2.NonBodyParameter") + proto.RegisterType((*Oauth2AccessCodeSecurity)(nil), "openapi.v2.Oauth2AccessCodeSecurity") + proto.RegisterType((*Oauth2ApplicationSecurity)(nil), "openapi.v2.Oauth2ApplicationSecurity") + proto.RegisterType((*Oauth2ImplicitSecurity)(nil), "openapi.v2.Oauth2ImplicitSecurity") + proto.RegisterType((*Oauth2PasswordSecurity)(nil), "openapi.v2.Oauth2PasswordSecurity") + proto.RegisterType((*Oauth2Scopes)(nil), "openapi.v2.Oauth2Scopes") + proto.RegisterType((*Operation)(nil), "openapi.v2.Operation") + proto.RegisterType((*Parameter)(nil), "openapi.v2.Parameter") + proto.RegisterType((*ParameterDefinitions)(nil), "openapi.v2.ParameterDefinitions") + proto.RegisterType((*ParametersItem)(nil), "openapi.v2.ParametersItem") + proto.RegisterType((*PathItem)(nil), "openapi.v2.PathItem") + proto.RegisterType((*PathParameterSubSchema)(nil), "openapi.v2.PathParameterSubSchema") + proto.RegisterType((*Paths)(nil), "openapi.v2.Paths") + proto.RegisterType((*PrimitivesItems)(nil), "openapi.v2.PrimitivesItems") + proto.RegisterType((*Properties)(nil), "openapi.v2.Properties") + proto.RegisterType((*QueryParameterSubSchema)(nil), "openapi.v2.QueryParameterSubSchema") + proto.RegisterType((*Response)(nil), "openapi.v2.Response") + proto.RegisterType((*ResponseDefinitions)(nil), "openapi.v2.ResponseDefinitions") + proto.RegisterType((*ResponseValue)(nil), "openapi.v2.ResponseValue") + proto.RegisterType((*Responses)(nil), "openapi.v2.Responses") + proto.RegisterType((*Schema)(nil), "openapi.v2.Schema") + proto.RegisterType((*SchemaItem)(nil), "openapi.v2.SchemaItem") + proto.RegisterType((*SecurityDefinitions)(nil), "openapi.v2.SecurityDefinitions") + proto.RegisterType((*SecurityDefinitionsItem)(nil), "openapi.v2.SecurityDefinitionsItem") + proto.RegisterType((*SecurityRequirement)(nil), "openapi.v2.SecurityRequirement") + proto.RegisterType((*StringArray)(nil), "openapi.v2.StringArray") + proto.RegisterType((*Tag)(nil), "openapi.v2.Tag") + proto.RegisterType((*TypeItem)(nil), "openapi.v2.TypeItem") + proto.RegisterType((*VendorExtension)(nil), "openapi.v2.VendorExtension") + proto.RegisterType((*Xml)(nil), "openapi.v2.Xml") +} + +func init() { proto.RegisterFile("OpenAPIv2/OpenAPIv2.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 3129 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x3b, 0x4b, 0x73, 0x1c, 0x57, + 0xd5, 0xf3, 0x7e, 0x1c, 0x69, 0x46, 0xa3, 0x96, 0x2c, 0xb7, 0x24, 0xc7, 0x71, 0xe4, 0x3c, 0x6c, + 0xe7, 0xb3, 0x9c, 0x4f, 0x29, 0x48, 0x05, 0x2a, 0x05, 0xf2, 0xab, 0xc6, 0xc4, 0x44, 0x4a, 0xcb, + 0x0e, 0x09, 0x04, 0xba, 0xae, 0x66, 0xee, 0x48, 0x9d, 0x74, 0xf7, 0x6d, 0x77, 0xf7, 0xc8, 0x1a, + 0x16, 0x2c, 0xa0, 0x8a, 0x35, 0x50, 0x59, 0x53, 0x15, 0x16, 0x14, 0x55, 0x59, 0xb0, 0x62, 0xc5, + 0x1f, 0x60, 0xc7, 0x3f, 0x60, 0x0d, 0x5b, 0xaa, 0x58, 0x51, 0x3c, 0xea, 0xbe, 0xfa, 0x31, 0x7d, + 0x7b, 0x1e, 0x96, 0x0b, 0x28, 0xd0, 0x6a, 0xe6, 0xde, 0x73, 0xee, 0xb9, 0xa7, 0x4f, 0x9f, 0xd7, + 0x3d, 0xe7, 0x36, 0xac, 0xef, 0x79, 0xd8, 0xdd, 0xdd, 0x7f, 0x70, 0xb2, 0x73, 0x2b, 0xfa, 0xb7, + 0xed, 0xf9, 0x24, 0x24, 0x1a, 0x10, 0x0f, 0xbb, 0xc8, 0xb3, 0xb6, 0x4f, 0x76, 0x36, 0xd6, 0x8f, + 0x08, 0x39, 0xb2, 0xf1, 0x2d, 0x06, 0x39, 0x1c, 0x0e, 0x6e, 0x21, 0x77, 0xc4, 0xd1, 0xb6, 0x1c, + 0xd0, 0x77, 0xfb, 0x7d, 0x2b, 0xb4, 0x88, 0x8b, 0xec, 0x7d, 0x9f, 0x78, 0xd8, 0x0f, 0x2d, 0x1c, + 0x3c, 0x08, 0xb1, 0xa3, 0xfd, 0x1f, 0xd4, 0x82, 0xde, 0x31, 0x76, 0x90, 0x5e, 0xbc, 0x52, 0xbc, + 0xb6, 0xb0, 0xa3, 0x6d, 0xc7, 0x34, 0xb7, 0x0f, 0x18, 0xa4, 0x5b, 0x30, 0x04, 0x8e, 0xb6, 0x01, + 0xf5, 0x43, 0x42, 0x6c, 0x8c, 0x5c, 0xbd, 0x74, 0xa5, 0x78, 0xad, 0xd1, 0x2d, 0x18, 0x72, 0xe2, + 0x76, 0x1d, 0xaa, 0xc4, 0xc5, 0x64, 0xb0, 0x75, 0x0f, 0xca, 0xbb, 0xee, 0x48, 0xbb, 0x01, 0xd5, + 0x13, 0x64, 0x0f, 0xb1, 0x20, 0xbc, 0xba, 0xcd, 0x19, 0xdc, 0x96, 0x0c, 0x6e, 0xef, 0xba, 0x23, + 0x83, 0xa3, 0x68, 0x1a, 0x54, 0x46, 0xc8, 0xb1, 0x19, 0xd1, 0xa6, 0xc1, 0xfe, 0x6f, 0x7d, 0x51, + 0x84, 0xf6, 0xae, 0x67, 0xbd, 0x8b, 0x47, 0x07, 0xb8, 0x37, 0xf4, 0xad, 0x70, 0x44, 0xd1, 0xc2, + 0x91, 0xc7, 0x29, 0x36, 0x0d, 0xf6, 0x9f, 0xce, 0xb9, 0xc8, 0xc1, 0x72, 0x29, 0xfd, 0xaf, 0xb5, + 0xa1, 0x64, 0xb9, 0x7a, 0x99, 0xcd, 0x94, 0x2c, 0x57, 0xbb, 0x02, 0x0b, 0x7d, 0x1c, 0xf4, 0x7c, + 0xcb, 0xa3, 0x32, 0xd0, 0x2b, 0x0c, 0x90, 0x9c, 0xd2, 0xbe, 0x06, 0x9d, 0x13, 0xec, 0xf6, 0x89, + 0x6f, 0xe2, 0xd3, 0x10, 0xbb, 0x01, 0x45, 0xab, 0x5e, 0x29, 0x33, 0xbe, 0x13, 0x02, 0x79, 0x0f, + 0x39, 0xb8, 0x4f, 0xf9, 0x5e, 0xe2, 0xd8, 0xf7, 0x24, 0xf2, 0xd6, 0x67, 0x45, 0xd8, 0xbc, 0x8d, + 0x02, 0xab, 0xb7, 0x3b, 0x0c, 0x8f, 0xb1, 0x1b, 0x5a, 0x3d, 0x44, 0x09, 0x4f, 0x64, 0x7d, 0x8c, + 0xad, 0xd2, 0x6c, 0x6c, 0x95, 0xe7, 0x61, 0xeb, 0x0f, 0x45, 0x68, 0xdd, 0x26, 0xfd, 0xd1, 0x3e, + 0xf2, 0x91, 0x83, 0x43, 0xec, 0x8f, 0x6f, 0x5a, 0xcc, 0x6e, 0x3a, 0x8b, 0x44, 0x37, 0xa0, 0xe1, + 0xe3, 0x27, 0x43, 0xcb, 0xc7, 0x7d, 0x26, 0xce, 0x86, 0x11, 0x8d, 0xb5, 0x1b, 0x91, 0x4a, 0x55, + 0xf3, 0x54, 0x2a, 0x52, 0x28, 0xd5, 0x03, 0xd6, 0xe6, 0x79, 0xc0, 0x1f, 0x17, 0xa1, 0x7e, 0x87, + 0xb8, 0x21, 0xea, 0x85, 0x11, 0xe3, 0xc5, 0x04, 0xe3, 0x1d, 0x28, 0x0f, 0x7d, 0xa9, 0x58, 0xf4, + 0xaf, 0xb6, 0x0a, 0x55, 0xec, 0x20, 0xcb, 0x16, 0x4f, 0xc3, 0x07, 0x4a, 0x46, 0x2a, 0xf3, 0x30, + 0xf2, 0x08, 0xea, 0x77, 0xf1, 0x00, 0x0d, 0xed, 0x50, 0x7b, 0x00, 0x17, 0x50, 0x64, 0x6f, 0xa6, + 0x17, 0x19, 0x9c, 0x5e, 0x9c, 0x40, 0x70, 0x15, 0x29, 0x4c, 0x74, 0xeb, 0x3b, 0xb0, 0x70, 0x17, + 0x0f, 0x2c, 0x97, 0x41, 0x02, 0xed, 0xe1, 0x64, 0xca, 0x17, 0x33, 0x94, 0x85, 0xb8, 0xd5, 0xc4, + 0xff, 0x58, 0x85, 0xc6, 0x5d, 0xd2, 0x1b, 0x3a, 0xd8, 0x0d, 0x35, 0x1d, 0xea, 0xc1, 0x53, 0x74, + 0x74, 0x84, 0x7d, 0x21, 0x3f, 0x39, 0xd4, 0x5e, 0x86, 0x8a, 0xe5, 0x0e, 0x08, 0x93, 0xe1, 0xc2, + 0x4e, 0x27, 0xb9, 0xc7, 0x03, 0x77, 0x40, 0x0c, 0x06, 0xa5, 0xc2, 0x3f, 0x26, 0x41, 0x28, 0xa4, + 0xca, 0xfe, 0x6b, 0x9b, 0xd0, 0x3c, 0x44, 0x01, 0x36, 0x3d, 0x14, 0x1e, 0x0b, 0xab, 0x6b, 0xd0, + 0x89, 0x7d, 0x14, 0x1e, 0xb3, 0x0d, 0x29, 0x77, 0x38, 0x60, 0x96, 0x46, 0x37, 0xe4, 0x43, 0xaa, + 0x5c, 0x3d, 0xe2, 0x06, 0x43, 0x0a, 0xaa, 0x31, 0x50, 0x34, 0xa6, 0x30, 0xcf, 0x27, 0xfd, 0x61, + 0x0f, 0x07, 0x7a, 0x9d, 0xc3, 0xe4, 0x58, 0x7b, 0x0d, 0xaa, 0x74, 0xa7, 0x40, 0x6f, 0x30, 0x4e, + 0x97, 0x93, 0x9c, 0xd2, 0x2d, 0x03, 0x83, 0xc3, 0xb5, 0xb7, 0xa9, 0x0d, 0x44, 0x52, 0xd5, 0x9b, + 0x0c, 0x3d, 0x25, 0xbc, 0x84, 0xd0, 0x8d, 0x24, 0xae, 0xf6, 0x75, 0x00, 0x4f, 0xda, 0x52, 0xa0, + 0x03, 0x5b, 0x79, 0x25, 0xbd, 0x91, 0x80, 0x26, 0x49, 0x24, 0xd6, 0x68, 0xef, 0x40, 0xd3, 0xc7, + 0x81, 0x47, 0xdc, 0x00, 0x07, 0xfa, 0x02, 0x23, 0xf0, 0x62, 0x92, 0x80, 0x21, 0x80, 0xc9, 0xf5, + 0xf1, 0x0a, 0xed, 0xab, 0xd0, 0x08, 0x84, 0x53, 0xd1, 0x17, 0xd9, 0x5b, 0x4f, 0xad, 0x96, 0x0e, + 0xc7, 0xe0, 0xd6, 0x48, 0x5f, 0xad, 0x11, 0x2d, 0xd0, 0x0c, 0x58, 0x95, 0xff, 0xcd, 0xa4, 0x04, + 0x5a, 0x59, 0x36, 0x24, 0xa1, 0x24, 0x1b, 0x2b, 0x41, 0x76, 0x52, 0xbb, 0x0a, 0x95, 0x10, 0x1d, + 0x05, 0x7a, 0x9b, 0x31, 0xb3, 0x94, 0xa4, 0xf1, 0x08, 0x1d, 0x19, 0x0c, 0xa8, 0xbd, 0x03, 0x2d, + 0x6a, 0x57, 0x3e, 0x55, 0xdb, 0x3e, 0xe9, 0x05, 0xfa, 0x12, 0xdb, 0x51, 0x4f, 0x62, 0xdf, 0x13, + 0x08, 0x77, 0x49, 0x2f, 0x30, 0x16, 0x71, 0x62, 0xa4, 0xb4, 0xce, 0xce, 0x3c, 0xd6, 0xf9, 0x18, + 0x1a, 0xf7, 0x4e, 0x91, 0xe3, 0xd9, 0x38, 0x78, 0x9e, 0xe6, 0xf9, 0xa3, 0x22, 0x2c, 0x26, 0xd9, + 0x9e, 0xc1, 0xbb, 0x66, 0x1d, 0xd2, 0x99, 0x9d, 0xfc, 0x3f, 0x4a, 0x00, 0xf7, 0x2d, 0x1b, 0x73, + 0x63, 0xd7, 0xd6, 0xa0, 0x36, 0x20, 0xbe, 0x83, 0x42, 0xb1, 0xbd, 0x18, 0x51, 0xc7, 0x17, 0x5a, + 0xa1, 0x2d, 0x1d, 0x3b, 0x1f, 0x8c, 0x73, 0x5c, 0xce, 0x72, 0x7c, 0x1d, 0xea, 0x7d, 0xee, 0xd9, + 0x98, 0x0d, 0x8f, 0xbd, 0x63, 0xca, 0x91, 0x84, 0xa7, 0xc2, 0x02, 0x37, 0xea, 0x38, 0x2c, 0xc8, + 0x08, 0x58, 0x4b, 0x44, 0xc0, 0x4d, 0x6a, 0x0b, 0xa8, 0x6f, 0x12, 0xd7, 0x1e, 0xe9, 0x75, 0x19, + 0x47, 0x50, 0x7f, 0xcf, 0xb5, 0x47, 0x59, 0x9d, 0x69, 0xcc, 0xa5, 0x33, 0xd7, 0xa1, 0x8e, 0xf9, + 0x2b, 0x17, 0x06, 0x9e, 0x65, 0x5b, 0xc0, 0x95, 0x6f, 0x00, 0xe6, 0x79, 0x03, 0x5f, 0xd4, 0x60, + 0xe3, 0x3e, 0xf1, 0x9d, 0xbb, 0x28, 0x44, 0x91, 0x03, 0x38, 0x18, 0x1e, 0x1e, 0xc8, 0xb4, 0x29, + 0x16, 0x4b, 0x71, 0x2c, 0x5a, 0xf2, 0xc8, 0x5a, 0xca, 0xcb, 0x55, 0xca, 0xf9, 0xf1, 0xb9, 0x92, + 0x08, 0x73, 0x37, 0x60, 0x19, 0xd9, 0x36, 0x79, 0x6a, 0x62, 0xc7, 0x0b, 0x47, 0x26, 0x4f, 0xbc, + 0xaa, 0x6c, 0xab, 0x25, 0x06, 0xb8, 0x47, 0xe7, 0x3f, 0x90, 0xc9, 0x56, 0xe6, 0x45, 0xc4, 0x3a, + 0x53, 0x4f, 0xe9, 0xcc, 0xff, 0x43, 0xd5, 0x0a, 0xb1, 0x23, 0x65, 0xbf, 0x99, 0xf2, 0x74, 0xbe, + 0xe5, 0x58, 0xa1, 0x75, 0xc2, 0x33, 0xc9, 0xc0, 0xe0, 0x98, 0xda, 0xeb, 0xb0, 0xdc, 0x23, 0xb6, + 0x8d, 0x7b, 0x94, 0x59, 0x53, 0x50, 0x6d, 0x32, 0xaa, 0x9d, 0x18, 0x70, 0x9f, 0xd3, 0x4f, 0xe8, + 0x16, 0x4c, 0xd1, 0x2d, 0x1d, 0xea, 0x0e, 0x3a, 0xb5, 0x9c, 0xa1, 0xc3, 0xbc, 0x66, 0xd1, 0x90, + 0x43, 0xba, 0x23, 0x3e, 0xed, 0xd9, 0xc3, 0xc0, 0x3a, 0xc1, 0xa6, 0xc4, 0x59, 0x64, 0x0f, 0xdf, + 0x89, 0x00, 0xdf, 0x14, 0xc8, 0x94, 0x8c, 0xe5, 0x32, 0x94, 0x96, 0x20, 0xc3, 0x87, 0x63, 0x64, + 0x04, 0x4e, 0x7b, 0x9c, 0x8c, 0x40, 0x7e, 0x01, 0xc0, 0x41, 0xa7, 0xa6, 0x8d, 0xdd, 0xa3, 0xf0, + 0x98, 0x79, 0xb3, 0xb2, 0xd1, 0x74, 0xd0, 0xe9, 0x43, 0x36, 0xc1, 0xc0, 0x96, 0x2b, 0xc1, 0x1d, + 0x01, 0xb6, 0x5c, 0x01, 0xd6, 0xa1, 0xee, 0xa1, 0x90, 0x2a, 0xab, 0xbe, 0xcc, 0x83, 0xad, 0x18, + 0x52, 0x8b, 0xa0, 0x74, 0xb9, 0xd0, 0x35, 0xb6, 0xae, 0xe1, 0xa0, 0x53, 0x26, 0x61, 0x06, 0xb4, + 0x5c, 0x01, 0x5c, 0x11, 0x40, 0xcb, 0xe5, 0xc0, 0x97, 0x60, 0x71, 0xe8, 0x5a, 0x4f, 0x86, 0x58, + 0xc0, 0x57, 0x19, 0xe7, 0x0b, 0x7c, 0x8e, 0xa3, 0x5c, 0x85, 0x0a, 0x76, 0x87, 0x8e, 0x7e, 0x21, + 0xeb, 0xaa, 0xa9, 0xa8, 0x19, 0x50, 0x7b, 0x11, 0x16, 0x9c, 0xa1, 0x1d, 0x5a, 0x9e, 0x8d, 0x4d, + 0x32, 0xd0, 0xd7, 0x98, 0x90, 0x40, 0x4e, 0xed, 0x0d, 0x94, 0xd6, 0x72, 0x71, 0x2e, 0x6b, 0xa9, + 0x42, 0xad, 0x8b, 0x51, 0x1f, 0xfb, 0xca, 0xb4, 0x38, 0xd6, 0xc5, 0x92, 0x5a, 0x17, 0xcb, 0x67, + 0xd3, 0xc5, 0xca, 0x74, 0x5d, 0xac, 0xce, 0xae, 0x8b, 0xb5, 0x19, 0x74, 0xb1, 0x3e, 0x5d, 0x17, + 0x1b, 0x33, 0xe8, 0x62, 0x73, 0x26, 0x5d, 0x84, 0xc9, 0xba, 0xb8, 0x30, 0x41, 0x17, 0x17, 0x27, + 0xe8, 0x62, 0x6b, 0x92, 0x2e, 0xb6, 0xa7, 0xe8, 0xe2, 0x52, 0xbe, 0x2e, 0x76, 0xe6, 0xd0, 0xc5, + 0xe5, 0x8c, 0x2e, 0x8e, 0x79, 0x4b, 0x6d, 0xb6, 0x23, 0xd4, 0xca, 0x3c, 0xda, 0xfa, 0xb7, 0x2a, + 0xe8, 0x5c, 0x5b, 0xff, 0x2d, 0x9e, 0x5d, 0x5a, 0x48, 0x55, 0x69, 0x21, 0x35, 0xb5, 0x85, 0xd4, + 0xcf, 0x66, 0x21, 0x8d, 0xe9, 0x16, 0xd2, 0x9c, 0xdd, 0x42, 0x60, 0x06, 0x0b, 0x59, 0x98, 0x6e, + 0x21, 0x8b, 0x33, 0x58, 0x48, 0x6b, 0x26, 0x0b, 0x69, 0x4f, 0xb6, 0x90, 0xa5, 0x09, 0x16, 0xd2, + 0x99, 0x60, 0x21, 0xcb, 0x93, 0x2c, 0x44, 0x9b, 0x62, 0x21, 0x2b, 0xf9, 0x16, 0xb2, 0x3a, 0x87, + 0x85, 0x5c, 0x98, 0xc9, 0x5b, 0xaf, 0xcd, 0xa3, 0xff, 0xdf, 0x82, 0x3a, 0x57, 0xff, 0x67, 0x38, + 0x7e, 0xf2, 0x85, 0x39, 0xc9, 0xf3, 0xe7, 0x25, 0xa8, 0xd0, 0x03, 0x64, 0x9c, 0x98, 0x16, 0x93, + 0x89, 0xa9, 0x0e, 0xf5, 0x13, 0xec, 0x07, 0x71, 0x65, 0x44, 0x0e, 0x67, 0x30, 0xa4, 0x6b, 0xd0, + 0x09, 0xb1, 0xef, 0x04, 0x26, 0x19, 0x98, 0x01, 0xf6, 0x4f, 0xac, 0x9e, 0x34, 0xaa, 0x36, 0x9b, + 0xdf, 0x1b, 0x1c, 0xf0, 0x59, 0xed, 0x26, 0xd4, 0x7b, 0xbc, 0x7c, 0x20, 0x9c, 0xfe, 0x4a, 0xf2, + 0x21, 0x44, 0x65, 0xc1, 0x90, 0x38, 0x14, 0xdd, 0xb6, 0x7a, 0xd8, 0x0d, 0x78, 0xfa, 0x34, 0x86, + 0xfe, 0x90, 0x83, 0x0c, 0x89, 0xa3, 0x14, 0x7e, 0x7d, 0x1e, 0xe1, 0xbf, 0x05, 0x4d, 0xa6, 0x0c, + 0xac, 0x56, 0x77, 0x23, 0x51, 0xab, 0x2b, 0x4f, 0x2e, 0xac, 0x6c, 0xdd, 0x85, 0xd6, 0x37, 0x02, + 0xe2, 0x1a, 0x78, 0x80, 0x7d, 0xec, 0xf6, 0xb0, 0xb6, 0x0c, 0x15, 0xd3, 0xc7, 0x03, 0x21, 0xe3, + 0xb2, 0x81, 0x07, 0xd3, 0xeb, 0x4f, 0x5b, 0x1e, 0xd4, 0xc5, 0x33, 0xcd, 0x58, 0x5c, 0x39, 0xf3, + 0x59, 0xe6, 0x1e, 0x34, 0x24, 0x50, 0xb9, 0xe5, 0x2b, 0xb2, 0xaa, 0x58, 0x52, 0x3b, 0x20, 0x0e, + 0xdd, 0x7a, 0x17, 0x16, 0x12, 0x0a, 0xa8, 0xa4, 0x74, 0x2d, 0x4d, 0x29, 0x25, 0x4c, 0xa1, 0xb7, + 0x82, 0xd8, 0xfb, 0xd0, 0x66, 0xc4, 0xe2, 0x22, 0x9a, 0x8a, 0xde, 0xeb, 0x69, 0x7a, 0x17, 0x94, + 0x45, 0x01, 0x49, 0x72, 0x0f, 0x5a, 0x82, 0x64, 0x78, 0xcc, 0xde, 0xad, 0x8a, 0xe2, 0x8d, 0x34, + 0xc5, 0xd5, 0xf1, 0x7a, 0x06, 0x5d, 0x38, 0x4e, 0x50, 0x56, 0x0f, 0xe6, 0x26, 0x28, 0x17, 0x4a, + 0x82, 0x1f, 0x81, 0x96, 0x22, 0x18, 0x9d, 0x1d, 0x32, 0x54, 0x6f, 0xa5, 0xa9, 0xae, 0xab, 0xa8, + 0xb2, 0xd5, 0xe3, 0x2f, 0x47, 0xc4, 0xd0, 0x79, 0x5f, 0x8e, 0xd0, 0x74, 0x41, 0xcc, 0x81, 0x4b, + 0x9c, 0x58, 0xb6, 0x34, 0x91, 0x2b, 0xd8, 0xb7, 0xd3, 0xd4, 0xaf, 0x4e, 0xa9, 0x7b, 0x24, 0xe5, + 0xfc, 0x96, 0xe4, 0x3d, 0xf4, 0x2d, 0xf7, 0x48, 0x49, 0x7d, 0x35, 0x49, 0xbd, 0x29, 0x17, 0x3e, + 0x86, 0x4e, 0x62, 0xe1, 0xae, 0xef, 0x23, 0xb5, 0x82, 0xdf, 0x4c, 0xf3, 0x96, 0xf2, 0xa9, 0x89, + 0xb5, 0x92, 0xec, 0x6f, 0xca, 0xd0, 0x79, 0x8f, 0xb8, 0xe9, 0x1a, 0x2f, 0x86, 0xcd, 0x63, 0xa6, + 0xc1, 0x66, 0x54, 0x77, 0x32, 0x83, 0xe1, 0xa1, 0x99, 0xaa, 0xf4, 0xbf, 0x9c, 0x55, 0xf8, 0x6c, + 0x82, 0xd3, 0x2d, 0x18, 0xfa, 0x71, 0x5e, 0xf2, 0x63, 0xc3, 0x65, 0x9a, 0x30, 0x98, 0x7d, 0x14, + 0x22, 0xf5, 0x4e, 0xfc, 0x19, 0x5e, 0x4d, 0xee, 0x94, 0x7f, 0x4c, 0xee, 0x16, 0x8c, 0x8d, 0x41, + 0xfe, 0x21, 0xfa, 0x10, 0x36, 0x9e, 0x0c, 0xb1, 0x3f, 0x52, 0xef, 0x54, 0xce, 0xbe, 0xc9, 0xf7, + 0x29, 0xb6, 0x72, 0x9b, 0x8b, 0x4f, 0xd4, 0x20, 0xcd, 0x84, 0x75, 0x0f, 0x85, 0xc7, 0xea, 0x2d, + 0x78, 0xf1, 0x63, 0x6b, 0xdc, 0x0a, 0x95, 0x3b, 0xac, 0x79, 0x4a, 0x48, 0xdc, 0x24, 0xf9, 0xbc, + 0x04, 0xfa, 0x1e, 0x1a, 0x86, 0xc7, 0x3b, 0xbb, 0xbd, 0x1e, 0x0e, 0x82, 0x3b, 0xa4, 0x8f, 0xa7, + 0xf5, 0x39, 0x06, 0x36, 0x79, 0x2a, 0xab, 0xf2, 0xf4, 0xbf, 0xf6, 0x06, 0x0d, 0x08, 0xc4, 0xc3, + 0xf2, 0x48, 0x94, 0x2a, 0x8d, 0x70, 0xea, 0x07, 0x0c, 0x6e, 0x08, 0x3c, 0x9a, 0x35, 0xd1, 0x69, + 0xe2, 0x5b, 0xdf, 0x67, 0xfd, 0x09, 0x93, 0xfa, 0x6f, 0x71, 0x20, 0x4a, 0x01, 0x1e, 0xfb, 0x36, + 0x4d, 0x60, 0x42, 0xf2, 0x29, 0xe6, 0x48, 0x3c, 0xff, 0x6c, 0xb0, 0x09, 0x0a, 0x1c, 0x0b, 0x1e, + 0xb5, 0xd9, 0x32, 0xef, 0xb9, 0x82, 0xdf, 0x5f, 0x8a, 0xb0, 0x2e, 0x64, 0xe4, 0x79, 0xf6, 0x2c, + 0x1d, 0x95, 0xe7, 0x23, 0xa4, 0xd4, 0x73, 0x57, 0x26, 0x3f, 0x77, 0x75, 0xb6, 0xe7, 0x9e, 0xab, + 0xa7, 0xf1, 0xc3, 0x12, 0xac, 0x71, 0xc6, 0x1e, 0x38, 0xf4, 0xb9, 0xad, 0xf0, 0x3f, 0x4d, 0x33, + 0xfe, 0x05, 0x42, 0xf8, 0x73, 0x51, 0x0a, 0x61, 0x1f, 0x05, 0xc1, 0x53, 0xe2, 0xf7, 0xff, 0x07, + 0xde, 0xfc, 0xc7, 0xb0, 0x98, 0xe4, 0xeb, 0x19, 0xfa, 0x3d, 0x2c, 0x42, 0xe4, 0x24, 0xdc, 0x3f, + 0xaf, 0x40, 0x73, 0xcf, 0xc3, 0x3e, 0x92, 0x87, 0x4d, 0x56, 0xb7, 0x2f, 0xb2, 0x3a, 0x2d, 0x2f, + 0xd3, 0xeb, 0x50, 0x0f, 0x86, 0x8e, 0x83, 0xfc, 0x91, 0xcc, 0xb9, 0xc5, 0x70, 0x86, 0x9c, 0x3b, + 0x53, 0xae, 0xad, 0xcc, 0x55, 0xae, 0x7d, 0x09, 0x16, 0x89, 0xe4, 0xcd, 0xb4, 0xfa, 0x52, 0xbc, + 0xd1, 0xdc, 0x83, 0x7e, 0xaa, 0xf7, 0x53, 0x1b, 0xeb, 0xfd, 0x24, 0x7b, 0x46, 0xf5, 0xb1, 0x9e, + 0xd1, 0x57, 0x52, 0x3d, 0x9b, 0x06, 0x13, 0xdd, 0x86, 0x32, 0x3d, 0xe3, 0xa1, 0x3e, 0xd9, 0xad, + 0x79, 0x33, 0xd9, 0xad, 0x69, 0x66, 0x33, 0x3b, 0x99, 0xe0, 0xa4, 0x7a, 0x34, 0x89, 0xd6, 0x16, + 0xa4, 0x5b, 0x5b, 0x97, 0x01, 0xfa, 0xd8, 0xf3, 0x71, 0x0f, 0x85, 0xb8, 0x2f, 0x4e, 0xbd, 0x89, + 0x99, 0xb3, 0x75, 0x77, 0x54, 0xea, 0xd7, 0x9a, 0x47, 0xfd, 0x7e, 0x59, 0x84, 0x66, 0x9c, 0x45, + 0xdc, 0x86, 0xf6, 0x21, 0xe9, 0x27, 0xe2, 0xad, 0x48, 0x1c, 0x52, 0x09, 0x5e, 0x2a, 0xf1, 0xe8, + 0x16, 0x8c, 0xd6, 0x61, 0x2a, 0x13, 0x79, 0x08, 0x9a, 0x4b, 0x5c, 0x73, 0x8c, 0x0e, 0x4f, 0x0b, + 0x2e, 0xa5, 0x98, 0x1a, 0xcb, 0x61, 0xba, 0x05, 0xa3, 0xe3, 0x8e, 0xcd, 0xc5, 0xd1, 0xf3, 0x08, + 0x56, 0x55, 0x7d, 0x36, 0x6d, 0x6f, 0xb2, 0xbd, 0x6c, 0x64, 0xc4, 0x10, 0x27, 0xe6, 0x6a, 0x93, + 0xf9, 0xac, 0x08, 0xed, 0xb4, 0x76, 0x68, 0x5f, 0x82, 0xe6, 0xb8, 0x44, 0xd4, 0xb9, 0x7e, 0xb7, + 0x60, 0xc4, 0x98, 0x54, 0x9a, 0x9f, 0x04, 0xc4, 0xa5, 0x67, 0x30, 0x7e, 0x22, 0x53, 0xa5, 0xcb, + 0xa9, 0x23, 0x1b, 0x95, 0xe6, 0x27, 0xc9, 0x89, 0xf8, 0xf9, 0x7f, 0x5f, 0x86, 0x46, 0x74, 0x74, + 0x50, 0x9c, 0xec, 0x5e, 0x83, 0xf2, 0x11, 0x0e, 0x55, 0x27, 0x91, 0xc8, 0xfe, 0x0d, 0x8a, 0x41, + 0x11, 0xbd, 0x61, 0x28, 0xfc, 0x63, 0x1e, 0xa2, 0x37, 0x0c, 0xb5, 0xeb, 0x50, 0xf1, 0x48, 0x20, + 0x3b, 0x40, 0x39, 0x98, 0x0c, 0x45, 0xbb, 0x09, 0xb5, 0x3e, 0xb6, 0x71, 0x88, 0xc5, 0x89, 0x3a, + 0x07, 0x59, 0x20, 0x69, 0xb7, 0xa0, 0x4e, 0x3c, 0xde, 0x86, 0xac, 0x4d, 0xc2, 0x97, 0x58, 0x94, + 0x15, 0x9a, 0x92, 0x8a, 0x22, 0x57, 0x1e, 0x2b, 0x14, 0x85, 0x9e, 0xc9, 0x3c, 0x14, 0xf6, 0x8e, + 0x45, 0xfb, 0x22, 0x07, 0x97, 0xe3, 0x8c, 0xb9, 0x89, 0xe6, 0x5c, 0x6e, 0xe2, 0xcc, 0x1d, 0xa4, + 0xbf, 0x56, 0x61, 0x4d, 0x9d, 0x4d, 0x9e, 0xd7, 0x18, 0xcf, 0x6b, 0x8c, 0xff, 0xed, 0x35, 0xc6, + 0xa7, 0x50, 0x65, 0x17, 0x34, 0x94, 0x94, 0x8a, 0x73, 0x50, 0xd2, 0x6e, 0x42, 0x85, 0xdd, 0x36, + 0x29, 0xb1, 0x45, 0xeb, 0x0a, 0x87, 0x2f, 0xea, 0x26, 0x0c, 0x6d, 0xeb, 0x67, 0x55, 0x58, 0x1a, + 0xd3, 0xda, 0xf3, 0x9e, 0xd4, 0x79, 0x4f, 0xea, 0x4c, 0x3d, 0x29, 0x95, 0x0e, 0x6b, 0xf3, 0x58, + 0xc3, 0xb7, 0x01, 0xe2, 0x14, 0xe4, 0x39, 0xdf, 0xf9, 0xfa, 0x55, 0x0d, 0x2e, 0xe6, 0x14, 0x46, + 0xce, 0xaf, 0x29, 0x9c, 0x5f, 0x53, 0x38, 0xbf, 0xa6, 0x10, 0x9b, 0xe1, 0xdf, 0x8b, 0xd0, 0x88, + 0xca, 0xe9, 0xd3, 0x2f, 0x76, 0x6d, 0x47, 0xdd, 0x19, 0x9e, 0x76, 0xaf, 0x65, 0x6b, 0xd6, 0x2c, + 0xf0, 0xc8, 0xab, 0xaf, 0x37, 0xa1, 0xce, 0x2b, 0xab, 0x32, 0x78, 0xac, 0x64, 0x0b, 0xb2, 0x81, + 0x21, 0x71, 0xb4, 0x37, 0xa0, 0x21, 0xae, 0x2b, 0xc9, 0x93, 0xf5, 0x6a, 0xfa, 0x64, 0xcd, 0x61, + 0x46, 0x84, 0x75, 0xf6, 0x3b, 0xcd, 0x18, 0x56, 0x14, 0x97, 0x11, 0xb5, 0xf7, 0x26, 0x3b, 0xa4, + 0x6c, 0xcc, 0x8d, 0x5a, 0x0b, 0x6a, 0x97, 0xf4, 0x93, 0x22, 0xb4, 0xd2, 0x5d, 0x86, 0x1d, 0xea, + 0x88, 0xf8, 0x44, 0x74, 0x7b, 0x5c, 0x71, 0xe6, 0xee, 0x16, 0x8c, 0x08, 0xef, 0xf9, 0x9e, 0xaf, + 0x7e, 0x5a, 0x84, 0x66, 0x74, 0xb2, 0xd7, 0xee, 0x40, 0x4b, 0x6e, 0x63, 0xf6, 0x48, 0x1f, 0x8b, + 0x07, 0xbd, 0x9c, 0xfb, 0xa0, 0xbc, 0xdb, 0xb1, 0x28, 0x17, 0xdd, 0x21, 0x7d, 0x75, 0x2b, 0xb0, + 0x34, 0xcf, 0xdb, 0xf8, 0x75, 0x13, 0x6a, 0xc2, 0x51, 0x2b, 0x4e, 0x7c, 0x79, 0x09, 0x4a, 0xd4, + 0x5b, 0x2d, 0x4f, 0xb8, 0xf4, 0x57, 0x99, 0x78, 0xe9, 0x6f, 0x5a, 0xe2, 0x31, 0x66, 0x89, 0xb5, + 0x8c, 0x25, 0x26, 0x5c, 0x62, 0x7d, 0x06, 0x97, 0xd8, 0x98, 0xee, 0x12, 0x9b, 0x33, 0xb8, 0x44, + 0x98, 0xc9, 0x25, 0x2e, 0x4c, 0x76, 0x89, 0x8b, 0x13, 0x5c, 0x62, 0x6b, 0x82, 0x4b, 0x6c, 0x4f, + 0x72, 0x89, 0x4b, 0x53, 0x5c, 0x62, 0x27, 0xeb, 0x12, 0x5f, 0x81, 0x36, 0x25, 0x9e, 0x30, 0x36, + 0x7e, 0x12, 0x68, 0x39, 0xe8, 0x34, 0x91, 0x2b, 0x50, 0x34, 0xcb, 0x4d, 0xa2, 0x69, 0x02, 0xcd, + 0x72, 0x13, 0x68, 0xc9, 0x40, 0xbf, 0x32, 0x76, 0x4d, 0x73, 0xa6, 0x13, 0xc1, 0x47, 0x79, 0x2e, + 0xe0, 0x42, 0xb6, 0xb5, 0x94, 0xf7, 0xe9, 0x89, 0xda, 0x1b, 0x68, 0xd7, 0x44, 0xd8, 0x5f, 0xcb, + 0xda, 0xfd, 0xa3, 0x91, 0x87, 0x79, 0xee, 0xce, 0x92, 0x81, 0xd7, 0x65, 0xd0, 0xbf, 0x98, 0x3d, + 0xdc, 0x47, 0x4d, 0x73, 0x19, 0xee, 0xaf, 0x43, 0x0d, 0xd9, 0x36, 0xd5, 0x4f, 0x3d, 0xb7, 0x77, + 0x5e, 0x45, 0xb6, 0xbd, 0x37, 0xd0, 0xbe, 0x0c, 0x90, 0x78, 0xa2, 0xf5, 0xac, 0x33, 0x8f, 0xb9, + 0x35, 0x12, 0x98, 0xda, 0xcb, 0xd0, 0xea, 0x5b, 0xd4, 0x82, 0x1c, 0xcb, 0x45, 0x21, 0xf1, 0xf5, + 0x0d, 0xa6, 0x20, 0xe9, 0xc9, 0xf4, 0x95, 0xd7, 0xcd, 0xb1, 0x2b, 0xaf, 0x2f, 0x41, 0xf9, 0xd4, + 0xb1, 0xf5, 0x4b, 0x59, 0x8b, 0xfb, 0xd0, 0xb1, 0x0d, 0x0a, 0xcb, 0x96, 0x59, 0x5f, 0x78, 0xd6, + 0x5b, 0xb1, 0x97, 0x9f, 0xe1, 0x56, 0xec, 0x8b, 0xf3, 0x78, 0xac, 0x1f, 0x00, 0xc4, 0x71, 0x6f, + 0xce, 0x2f, 0x8d, 0xde, 0x86, 0x85, 0x81, 0x65, 0x63, 0x33, 0x3f, 0xa4, 0xc6, 0x37, 0x9e, 0xbb, + 0x05, 0x03, 0x06, 0xd1, 0x28, 0xf6, 0xe2, 0x21, 0xac, 0x28, 0xba, 0xb9, 0xda, 0x77, 0x27, 0xc7, + 0xaf, 0x6b, 0xd9, 0x84, 0x3a, 0xa7, 0x25, 0xac, 0x0e, 0x67, 0x7f, 0xaa, 0xc0, 0xc5, 0xbc, 0x66, + 0xb4, 0x03, 0x2f, 0x1c, 0xa2, 0xc0, 0xea, 0x99, 0x28, 0xf5, 0x95, 0x90, 0x19, 0xd5, 0x7c, 0xb9, + 0x68, 0x5e, 0x4b, 0x55, 0x58, 0xf3, 0xbf, 0x2a, 0xea, 0x16, 0x8c, 0xcd, 0xc3, 0x09, 0x1f, 0x1d, + 0xdd, 0x87, 0x0e, 0xf2, 0x2c, 0xf3, 0x53, 0x3c, 0x8a, 0x77, 0xe0, 0x92, 0x4c, 0xd5, 0xb5, 0xd2, + 0x5f, 0x59, 0x75, 0x0b, 0x46, 0x1b, 0xa5, 0xbf, 0xbb, 0xfa, 0x1e, 0xe8, 0x84, 0xb5, 0x25, 0x4c, + 0x4b, 0x34, 0xa4, 0x62, 0x7a, 0xe5, 0x6c, 0x57, 0x54, 0xdd, 0xbb, 0xea, 0x16, 0x8c, 0x35, 0xa2, + 0xee, 0x6a, 0xc5, 0xf4, 0x3d, 0xd1, 0xeb, 0x89, 0xe9, 0x57, 0xf2, 0xe8, 0x8f, 0xb7, 0x85, 0x62, + 0xfa, 0x99, 0x86, 0xd1, 0x11, 0x6c, 0x0a, 0xfa, 0x28, 0x6e, 0x24, 0xc6, 0x5b, 0xf0, 0x00, 0xf7, + 0x4a, 0x76, 0x0b, 0x45, 0xdb, 0xb1, 0x5b, 0x30, 0xd6, 0x49, 0x6e, 0x4f, 0x12, 0xc7, 0x1b, 0xb1, + 0xae, 0x2e, 0x4b, 0x17, 0xe2, 0x8d, 0x6a, 0x59, 0xef, 0x98, 0xd7, 0x03, 0xee, 0x16, 0x0c, 0x21, + 0x93, 0x2c, 0x2c, 0xd6, 0xf0, 0xe3, 0x58, 0xc3, 0x13, 0x2d, 0x01, 0xed, 0xfd, 0xc9, 0x1a, 0x7e, + 0x29, 0xa7, 0x6d, 0xc4, 0x2f, 0x16, 0xa8, 0xb5, 0xfa, 0x2a, 0x2c, 0x24, 0x6f, 0x2e, 0xac, 0xc6, + 0x1f, 0xf7, 0x95, 0xe3, 0x3b, 0x0e, 0xbf, 0x2d, 0x42, 0xf9, 0x11, 0x52, 0xdf, 0x8a, 0x98, 0xfe, + 0xb1, 0x5b, 0xc6, 0xb3, 0x95, 0xcf, 0xfc, 0x8d, 0xc8, 0x5c, 0x5f, 0x70, 0x5d, 0x81, 0x86, 0x8c, + 0x30, 0x39, 0xcf, 0xf7, 0x31, 0x2c, 0x7d, 0x30, 0x56, 0x6f, 0x7a, 0x8e, 0x1f, 0x93, 0xfc, 0xae, + 0x08, 0xe5, 0x0f, 0x1d, 0x5b, 0x29, 0xbd, 0x4b, 0xd0, 0xa4, 0xbf, 0x81, 0x87, 0x7a, 0xf2, 0x5e, + 0x49, 0x3c, 0x41, 0x93, 0x3f, 0xcf, 0xc7, 0x03, 0xeb, 0x54, 0x64, 0x79, 0x62, 0x44, 0x57, 0xa1, + 0x30, 0xf4, 0xad, 0xc3, 0x61, 0x88, 0xc5, 0x67, 0x7a, 0xf1, 0x04, 0x4d, 0x65, 0x9e, 0xfa, 0xc8, + 0xf3, 0x70, 0x5f, 0x1c, 0xc1, 0xe5, 0xf0, 0xcc, 0x7d, 0xcc, 0xdb, 0xaf, 0x42, 0x9b, 0xf8, 0x47, + 0x12, 0xd7, 0x3c, 0xd9, 0xb9, 0xbd, 0x28, 0xbe, 0x5d, 0xdd, 0xf7, 0x49, 0x48, 0xf6, 0x8b, 0xbf, + 0x28, 0x95, 0xf7, 0x76, 0x0f, 0x0e, 0x6b, 0xec, 0x63, 0xd0, 0x37, 0xff, 0x19, 0x00, 0x00, 0xff, + 0xff, 0xd4, 0x0a, 0xef, 0xca, 0xe4, 0x3a, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto new file mode 100644 index 000000000000..557c88072cd8 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/OpenAPIv2.proto @@ -0,0 +1,663 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// THIS FILE IS AUTOMATICALLY GENERATED. + +syntax = "proto3"; + +package openapi.v2; + +import "google/protobuf/any.proto"; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIProto"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.openapi_v2"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +option objc_class_prefix = "OAS"; + +message AdditionalPropertiesItem { + oneof oneof { + Schema schema = 1; + bool boolean = 2; + } +} + +message Any { + google.protobuf.Any value = 1; + string yaml = 2; +} + +message ApiKeySecurity { + string type = 1; + string name = 2; + string in = 3; + string description = 4; + repeated NamedAny vendor_extension = 5; +} + +message BasicAuthenticationSecurity { + string type = 1; + string description = 2; + repeated NamedAny vendor_extension = 3; +} + +message BodyParameter { + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 1; + // The name of the parameter. + string name = 2; + // Determines the location of the parameter. + string in = 3; + // Determines whether or not this parameter is required or optional. + bool required = 4; + Schema schema = 5; + repeated NamedAny vendor_extension = 6; +} + +// Contact information for the owners of the API. +message Contact { + // The identifying name of the contact person/organization. + string name = 1; + // The URL pointing to the contact information. + string url = 2; + // The email address of the contact person/organization. + string email = 3; + repeated NamedAny vendor_extension = 4; +} + +message Default { + repeated NamedAny additional_properties = 1; +} + +// One or more JSON objects describing the schemas being consumed and produced by the API. +message Definitions { + repeated NamedSchema additional_properties = 1; +} + +message Document { + // The Swagger version of this document. + string swagger = 1; + Info info = 2; + // The host (name or ip) of the API. Example: 'swagger.io' + string host = 3; + // The base path to the API. Example: '/api'. + string base_path = 4; + // The transfer protocol of the API. + repeated string schemes = 5; + // A list of MIME types accepted by the API. + repeated string consumes = 6; + // A list of MIME types the API can produce. + repeated string produces = 7; + Paths paths = 8; + Definitions definitions = 9; + ParameterDefinitions parameters = 10; + ResponseDefinitions responses = 11; + repeated SecurityRequirement security = 12; + SecurityDefinitions security_definitions = 13; + repeated Tag tags = 14; + ExternalDocs external_docs = 15; + repeated NamedAny vendor_extension = 16; +} + +message Examples { + repeated NamedAny additional_properties = 1; +} + +// information about external documentation +message ExternalDocs { + string description = 1; + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// A deterministic version of a JSON Schema object. +message FileSchema { + string format = 1; + string title = 2; + string description = 3; + Any default = 4; + repeated string required = 5; + string type = 6; + bool read_only = 7; + ExternalDocs external_docs = 8; + Any example = 9; + repeated NamedAny vendor_extension = 10; +} + +message FormDataParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Header { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + string description = 18; + repeated NamedAny vendor_extension = 19; +} + +message HeaderParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +message Headers { + repeated NamedHeader additional_properties = 1; +} + +// General information about the API. +message Info { + // A unique and precise title of the API. + string title = 1; + // A semantic version number of the API. + string version = 2; + // A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed. + string description = 3; + // The terms of service for the API. + string terms_of_service = 4; + Contact contact = 5; + License license = 6; + repeated NamedAny vendor_extension = 7; +} + +message ItemsItem { + repeated Schema schema = 1; +} + +message JsonReference { + string _ref = 1; + string description = 2; +} + +message License { + // The name of the license type. It's encouraged to use an OSI compatible license. + string name = 1; + // The URL pointing to the license. + string url = 2; + repeated NamedAny vendor_extension = 3; +} + +// Automatically-generated message used to represent maps of Any as ordered (name,value) pairs. +message NamedAny { + // Map key + string name = 1; + // Mapped value + Any value = 2; +} + +// Automatically-generated message used to represent maps of Header as ordered (name,value) pairs. +message NamedHeader { + // Map key + string name = 1; + // Mapped value + Header value = 2; +} + +// Automatically-generated message used to represent maps of Parameter as ordered (name,value) pairs. +message NamedParameter { + // Map key + string name = 1; + // Mapped value + Parameter value = 2; +} + +// Automatically-generated message used to represent maps of PathItem as ordered (name,value) pairs. +message NamedPathItem { + // Map key + string name = 1; + // Mapped value + PathItem value = 2; +} + +// Automatically-generated message used to represent maps of Response as ordered (name,value) pairs. +message NamedResponse { + // Map key + string name = 1; + // Mapped value + Response value = 2; +} + +// Automatically-generated message used to represent maps of ResponseValue as ordered (name,value) pairs. +message NamedResponseValue { + // Map key + string name = 1; + // Mapped value + ResponseValue value = 2; +} + +// Automatically-generated message used to represent maps of Schema as ordered (name,value) pairs. +message NamedSchema { + // Map key + string name = 1; + // Mapped value + Schema value = 2; +} + +// Automatically-generated message used to represent maps of SecurityDefinitionsItem as ordered (name,value) pairs. +message NamedSecurityDefinitionsItem { + // Map key + string name = 1; + // Mapped value + SecurityDefinitionsItem value = 2; +} + +// Automatically-generated message used to represent maps of string as ordered (name,value) pairs. +message NamedString { + // Map key + string name = 1; + // Mapped value + string value = 2; +} + +// Automatically-generated message used to represent maps of StringArray as ordered (name,value) pairs. +message NamedStringArray { + // Map key + string name = 1; + // Mapped value + StringArray value = 2; +} + +message NonBodyParameter { + oneof oneof { + HeaderParameterSubSchema header_parameter_sub_schema = 1; + FormDataParameterSubSchema form_data_parameter_sub_schema = 2; + QueryParameterSubSchema query_parameter_sub_schema = 3; + PathParameterSubSchema path_parameter_sub_schema = 4; + } +} + +message Oauth2AccessCodeSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string token_url = 5; + string description = 6; + repeated NamedAny vendor_extension = 7; +} + +message Oauth2ApplicationSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2ImplicitSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string authorization_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2PasswordSecurity { + string type = 1; + string flow = 2; + Oauth2Scopes scopes = 3; + string token_url = 4; + string description = 5; + repeated NamedAny vendor_extension = 6; +} + +message Oauth2Scopes { + repeated NamedString additional_properties = 1; +} + +message Operation { + repeated string tags = 1; + // A brief summary of the operation. + string summary = 2; + // A longer description of the operation, GitHub Flavored Markdown is allowed. + string description = 3; + ExternalDocs external_docs = 4; + // A unique identifier of the operation. + string operation_id = 5; + // A list of MIME types the API can produce. + repeated string produces = 6; + // A list of MIME types the API can consume. + repeated string consumes = 7; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 8; + Responses responses = 9; + // The transfer protocol of the API. + repeated string schemes = 10; + bool deprecated = 11; + repeated SecurityRequirement security = 12; + repeated NamedAny vendor_extension = 13; +} + +message Parameter { + oneof oneof { + BodyParameter body_parameter = 1; + NonBodyParameter non_body_parameter = 2; + } +} + +// One or more JSON representations for parameters +message ParameterDefinitions { + repeated NamedParameter additional_properties = 1; +} + +message ParametersItem { + oneof oneof { + Parameter parameter = 1; + JsonReference json_reference = 2; + } +} + +message PathItem { + string _ref = 1; + Operation get = 2; + Operation put = 3; + Operation post = 4; + Operation delete = 5; + Operation options = 6; + Operation head = 7; + Operation patch = 8; + // The parameters needed to send a valid API call. + repeated ParametersItem parameters = 9; + repeated NamedAny vendor_extension = 10; +} + +message PathParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + string type = 5; + string format = 6; + PrimitivesItems items = 7; + string collection_format = 8; + Any default = 9; + double maximum = 10; + bool exclusive_maximum = 11; + double minimum = 12; + bool exclusive_minimum = 13; + int64 max_length = 14; + int64 min_length = 15; + string pattern = 16; + int64 max_items = 17; + int64 min_items = 18; + bool unique_items = 19; + repeated Any enum = 20; + double multiple_of = 21; + repeated NamedAny vendor_extension = 22; +} + +// Relative paths to the individual endpoints. They must be relative to the 'basePath'. +message Paths { + repeated NamedAny vendor_extension = 1; + repeated NamedPathItem path = 2; +} + +message PrimitivesItems { + string type = 1; + string format = 2; + PrimitivesItems items = 3; + string collection_format = 4; + Any default = 5; + double maximum = 6; + bool exclusive_maximum = 7; + double minimum = 8; + bool exclusive_minimum = 9; + int64 max_length = 10; + int64 min_length = 11; + string pattern = 12; + int64 max_items = 13; + int64 min_items = 14; + bool unique_items = 15; + repeated Any enum = 16; + double multiple_of = 17; + repeated NamedAny vendor_extension = 18; +} + +message Properties { + repeated NamedSchema additional_properties = 1; +} + +message QueryParameterSubSchema { + // Determines whether or not this parameter is required or optional. + bool required = 1; + // Determines the location of the parameter. + string in = 2; + // A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed. + string description = 3; + // The name of the parameter. + string name = 4; + // allows sending a parameter by name only or with an empty value. + bool allow_empty_value = 5; + string type = 6; + string format = 7; + PrimitivesItems items = 8; + string collection_format = 9; + Any default = 10; + double maximum = 11; + bool exclusive_maximum = 12; + double minimum = 13; + bool exclusive_minimum = 14; + int64 max_length = 15; + int64 min_length = 16; + string pattern = 17; + int64 max_items = 18; + int64 min_items = 19; + bool unique_items = 20; + repeated Any enum = 21; + double multiple_of = 22; + repeated NamedAny vendor_extension = 23; +} + +message Response { + string description = 1; + SchemaItem schema = 2; + Headers headers = 3; + Examples examples = 4; + repeated NamedAny vendor_extension = 5; +} + +// One or more JSON representations for parameters +message ResponseDefinitions { + repeated NamedResponse additional_properties = 1; +} + +message ResponseValue { + oneof oneof { + Response response = 1; + JsonReference json_reference = 2; + } +} + +// Response objects names can either be any valid HTTP status code or 'default'. +message Responses { + repeated NamedResponseValue response_code = 1; + repeated NamedAny vendor_extension = 2; +} + +// A deterministic version of a JSON Schema object. +message Schema { + string _ref = 1; + string format = 2; + string title = 3; + string description = 4; + Any default = 5; + double multiple_of = 6; + double maximum = 7; + bool exclusive_maximum = 8; + double minimum = 9; + bool exclusive_minimum = 10; + int64 max_length = 11; + int64 min_length = 12; + string pattern = 13; + int64 max_items = 14; + int64 min_items = 15; + bool unique_items = 16; + int64 max_properties = 17; + int64 min_properties = 18; + repeated string required = 19; + repeated Any enum = 20; + AdditionalPropertiesItem additional_properties = 21; + TypeItem type = 22; + ItemsItem items = 23; + repeated Schema all_of = 24; + Properties properties = 25; + string discriminator = 26; + bool read_only = 27; + Xml xml = 28; + ExternalDocs external_docs = 29; + Any example = 30; + repeated NamedAny vendor_extension = 31; +} + +message SchemaItem { + oneof oneof { + Schema schema = 1; + FileSchema file_schema = 2; + } +} + +message SecurityDefinitions { + repeated NamedSecurityDefinitionsItem additional_properties = 1; +} + +message SecurityDefinitionsItem { + oneof oneof { + BasicAuthenticationSecurity basic_authentication_security = 1; + ApiKeySecurity api_key_security = 2; + Oauth2ImplicitSecurity oauth2_implicit_security = 3; + Oauth2PasswordSecurity oauth2_password_security = 4; + Oauth2ApplicationSecurity oauth2_application_security = 5; + Oauth2AccessCodeSecurity oauth2_access_code_security = 6; + } +} + +message SecurityRequirement { + repeated NamedStringArray additional_properties = 1; +} + +message StringArray { + repeated string value = 1; +} + +message Tag { + string name = 1; + string description = 2; + ExternalDocs external_docs = 3; + repeated NamedAny vendor_extension = 4; +} + +message TypeItem { + repeated string value = 1; +} + +// Any property starting with x- is valid. +message VendorExtension { + repeated NamedAny additional_properties = 1; +} + +message Xml { + string name = 1; + string namespace = 2; + string prefix = 3; + bool attribute = 4; + bool wrapped = 5; + repeated NamedAny vendor_extension = 6; +} + diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md new file mode 100644 index 000000000000..836fb32a7ef0 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/README.md @@ -0,0 +1,16 @@ +# OpenAPI v2 Protocol Buffer Models + +This directory contains a Protocol Buffer-language model +and related code for supporting OpenAPI v2. + +Gnostic applications and plugins can use OpenAPIv2.proto +to generate Protocol Buffer support code for their preferred languages. + +OpenAPIv2.go is used by Gnostic to read JSON and YAML OpenAPI +descriptions into the Protocol Buffer-based datastructures +generated from OpenAPIv2.proto. + +OpenAPIv2.proto and OpenAPIv2.go are generated by the Gnostic +compiler generator, and OpenAPIv2.pb.go is generated by +protoc, the Protocol Buffer compiler, and protoc-gen-go, the +Protocol Buffer Go code generation plugin. diff --git a/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json new file mode 100644 index 000000000000..2815a26ea718 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/OpenAPIv2/openapi-2.0.json @@ -0,0 +1,1610 @@ +{ + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + }, + "description": { + "type": "string" + } + } + } + } +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/README.md b/vendor/github.com/googleapis/gnostic/compiler/README.md new file mode 100644 index 000000000000..848b16c69b80 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/README.md @@ -0,0 +1,3 @@ +# Compiler support code + +This directory contains compiler support code used by Gnostic and Gnostic extensions. \ No newline at end of file diff --git a/vendor/github.com/googleapis/gnostic/compiler/context.go b/vendor/github.com/googleapis/gnostic/compiler/context.go new file mode 100644 index 000000000000..a64c1b75db40 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/context.go @@ -0,0 +1,43 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Context contains state of the compiler as it traverses a document. +type Context struct { + Parent *Context + Name string + ExtensionHandlers *[]ExtensionHandler +} + +// NewContextWithExtensions returns a new object representing the compiler state +func NewContextWithExtensions(name string, parent *Context, extensionHandlers *[]ExtensionHandler) *Context { + return &Context{Name: name, Parent: parent, ExtensionHandlers: extensionHandlers} +} + +// NewContext returns a new object representing the compiler state +func NewContext(name string, parent *Context) *Context { + if parent != nil { + return &Context{Name: name, Parent: parent, ExtensionHandlers: parent.ExtensionHandlers} + } + return &Context{Name: name, Parent: parent, ExtensionHandlers: nil} +} + +// Description returns a text description of the compiler state +func (context *Context) Description() string { + if context.Parent != nil { + return context.Parent.Description() + "." + context.Name + } + return context.Name +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/error.go b/vendor/github.com/googleapis/gnostic/compiler/error.go new file mode 100644 index 000000000000..d8672c1008b7 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/error.go @@ -0,0 +1,61 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +// Error represents compiler errors and their location in the document. +type Error struct { + Context *Context + Message string +} + +// NewError creates an Error. +func NewError(context *Context, message string) *Error { + return &Error{Context: context, Message: message} +} + +// Error returns the string value of an Error. +func (err *Error) Error() string { + if err.Context == nil { + return "ERROR " + err.Message + } + return "ERROR " + err.Context.Description() + " " + err.Message +} + +// ErrorGroup is a container for groups of Error values. +type ErrorGroup struct { + Errors []error +} + +// NewErrorGroupOrNil returns a new ErrorGroup for a slice of errors or nil if the slice is empty. +func NewErrorGroupOrNil(errors []error) error { + if len(errors) == 0 { + return nil + } else if len(errors) == 1 { + return errors[0] + } else { + return &ErrorGroup{Errors: errors} + } +} + +func (group *ErrorGroup) Error() string { + result := "" + for i, err := range group.Errors { + if i > 0 { + result += "\n" + } + result += err.Error() + } + return result +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go new file mode 100644 index 000000000000..1f85b650e817 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/extension-handler.go @@ -0,0 +1,101 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "bytes" + "fmt" + "os/exec" + + "strings" + + "errors" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes/any" + ext_plugin "github.com/googleapis/gnostic/extensions" + yaml "gopkg.in/yaml.v2" +) + +// ExtensionHandler describes a binary that is called by the compiler to handle specification extensions. +type ExtensionHandler struct { + Name string +} + +// HandleExtension calls a binary extension handler. +func HandleExtension(context *Context, in interface{}, extensionName string) (bool, *any.Any, error) { + handled := false + var errFromPlugin error + var outFromPlugin *any.Any + + if context != nil && context.ExtensionHandlers != nil && len(*(context.ExtensionHandlers)) != 0 { + for _, customAnyProtoGenerator := range *(context.ExtensionHandlers) { + outFromPlugin, errFromPlugin = customAnyProtoGenerator.handle(in, extensionName) + if outFromPlugin == nil { + continue + } else { + handled = true + break + } + } + } + return handled, outFromPlugin, errFromPlugin +} + +func (extensionHandlers *ExtensionHandler) handle(in interface{}, extensionName string) (*any.Any, error) { + if extensionHandlers.Name != "" { + binary, _ := yaml.Marshal(in) + + request := &ext_plugin.ExtensionHandlerRequest{} + + version := &ext_plugin.Version{} + version.Major = 0 + version.Minor = 1 + version.Patch = 0 + request.CompilerVersion = version + + request.Wrapper = &ext_plugin.Wrapper{} + + request.Wrapper.Version = "v2" + request.Wrapper.Yaml = string(binary) + request.Wrapper.ExtensionName = extensionName + + requestBytes, _ := proto.Marshal(request) + cmd := exec.Command(extensionHandlers.Name) + cmd.Stdin = bytes.NewReader(requestBytes) + output, err := cmd.Output() + + if err != nil { + fmt.Printf("Error: %+v\n", err) + return nil, err + } + response := &ext_plugin.ExtensionHandlerResponse{} + err = proto.Unmarshal(output, response) + if err != nil { + fmt.Printf("Error: %+v\n", err) + fmt.Printf("%s\n", string(output)) + return nil, err + } + if !response.Handled { + return nil, nil + } + if len(response.Error) != 0 { + message := fmt.Sprintf("Errors when parsing: %+v for field %s by vendor extension handler %s. Details %+v", in, extensionName, extensionHandlers.Name, strings.Join(response.Error, ",")) + return nil, errors.New(message) + } + return response.Value, nil + } + return nil, nil +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/helpers.go b/vendor/github.com/googleapis/gnostic/compiler/helpers.go new file mode 100644 index 000000000000..76df635ff9bf --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/helpers.go @@ -0,0 +1,197 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "fmt" + "gopkg.in/yaml.v2" + "regexp" + "sort" + "strconv" +) + +// compiler helper functions, usually called from generated code + +// UnpackMap gets a yaml.MapSlice if possible. +func UnpackMap(in interface{}) (yaml.MapSlice, bool) { + m, ok := in.(yaml.MapSlice) + if ok { + return m, true + } + // do we have an empty array? + a, ok := in.([]interface{}) + if ok && len(a) == 0 { + // if so, return an empty map + return yaml.MapSlice{}, true + } + return nil, false +} + +// SortedKeysForMap returns the sorted keys of a yaml.MapSlice. +func SortedKeysForMap(m yaml.MapSlice) []string { + keys := make([]string, 0) + for _, item := range m { + keys = append(keys, item.Key.(string)) + } + sort.Strings(keys) + return keys +} + +// MapHasKey returns true if a yaml.MapSlice contains a specified key. +func MapHasKey(m yaml.MapSlice, key string) bool { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return true + } + } + return false +} + +// MapValueForKey gets the value of a map value for a specified key. +func MapValueForKey(m yaml.MapSlice, key string) interface{} { + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok && key == itemKey { + return item.Value + } + } + return nil +} + +// ConvertInterfaceArrayToStringArray converts an array of interfaces to an array of strings, if possible. +func ConvertInterfaceArrayToStringArray(interfaceArray []interface{}) []string { + stringArray := make([]string, 0) + for _, item := range interfaceArray { + v, ok := item.(string) + if ok { + stringArray = append(stringArray, v) + } + } + return stringArray +} + +// MissingKeysInMap identifies which keys from a list of required keys are not in a map. +func MissingKeysInMap(m yaml.MapSlice, requiredKeys []string) []string { + missingKeys := make([]string, 0) + for _, k := range requiredKeys { + if !MapHasKey(m, k) { + missingKeys = append(missingKeys, k) + } + } + return missingKeys +} + +// InvalidKeysInMap returns keys in a map that don't match a list of allowed keys and patterns. +func InvalidKeysInMap(m yaml.MapSlice, allowedKeys []string, allowedPatterns []*regexp.Regexp) []string { + invalidKeys := make([]string, 0) + for _, item := range m { + itemKey, ok := item.Key.(string) + if ok { + key := itemKey + found := false + // does the key match an allowed key? + for _, allowedKey := range allowedKeys { + if key == allowedKey { + found = true + break + } + } + if !found { + // does the key match an allowed pattern? + for _, allowedPattern := range allowedPatterns { + if allowedPattern.MatchString(key) { + found = true + break + } + } + if !found { + invalidKeys = append(invalidKeys, key) + } + } + } + } + return invalidKeys +} + +// DescribeMap describes a map (for debugging purposes). +func DescribeMap(in interface{}, indent string) string { + description := "" + m, ok := in.(map[string]interface{}) + if ok { + keys := make([]string, 0) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + v := m[k] + description += fmt.Sprintf("%s%s:\n", indent, k) + description += DescribeMap(v, indent+" ") + } + return description + } + a, ok := in.([]interface{}) + if ok { + for i, v := range a { + description += fmt.Sprintf("%s%d:\n", indent, i) + description += DescribeMap(v, indent+" ") + } + return description + } + description += fmt.Sprintf("%s%+v\n", indent, in) + return description +} + +// PluralProperties returns the string "properties" pluralized. +func PluralProperties(count int) string { + if count == 1 { + return "property" + } + return "properties" +} + +// StringArrayContainsValue returns true if a string array contains a specified value. +func StringArrayContainsValue(array []string, value string) bool { + for _, item := range array { + if item == value { + return true + } + } + return false +} + +// StringArrayContainsValues returns true if a string array contains all of a list of specified values. +func StringArrayContainsValues(array []string, values []string) bool { + for _, value := range values { + if !StringArrayContainsValue(array, value) { + return false + } + } + return true +} + +// StringValue returns the string value of an item. +func StringValue(item interface{}) (value string, ok bool) { + value, ok = item.(string) + if ok { + return value, ok + } + intValue, ok := item.(int) + if ok { + return strconv.Itoa(intValue), true + } + return "", false +} diff --git a/vendor/github.com/googleapis/gnostic/compiler/main.go b/vendor/github.com/googleapis/gnostic/compiler/main.go new file mode 100644 index 000000000000..9713a21cca26 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/main.go @@ -0,0 +1,16 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package compiler provides support functions to generated compiler code. +package compiler diff --git a/vendor/github.com/googleapis/gnostic/compiler/reader.go b/vendor/github.com/googleapis/gnostic/compiler/reader.go new file mode 100644 index 000000000000..c954a2d9b24d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/compiler/reader.go @@ -0,0 +1,175 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package compiler + +import ( + "errors" + "fmt" + "gopkg.in/yaml.v2" + "io/ioutil" + "log" + "net/http" + "net/url" + "path/filepath" + "strings" +) + +var fileCache map[string][]byte +var infoCache map[string]interface{} +var count int64 + +var verboseReader = false + +func initializeFileCache() { + if fileCache == nil { + fileCache = make(map[string][]byte, 0) + } +} + +func initializeInfoCache() { + if infoCache == nil { + infoCache = make(map[string]interface{}, 0) + } +} + +// FetchFile gets a specified file from the local filesystem or a remote location. +func FetchFile(fileurl string) ([]byte, error) { + initializeFileCache() + bytes, ok := fileCache[fileurl] + if ok { + if verboseReader { + log.Printf("Cache hit %s", fileurl) + } + return bytes, nil + } + if verboseReader { + log.Printf("Fetching %s", fileurl) + } + response, err := http.Get(fileurl) + if err != nil { + return nil, err + } + if response.StatusCode != 200 { + return nil, errors.New(fmt.Sprintf("Error downloading %s: %s", fileurl, response.Status)) + } + defer response.Body.Close() + bytes, err = ioutil.ReadAll(response.Body) + if err == nil { + fileCache[fileurl] = bytes + } + return bytes, err +} + +// ReadBytesForFile reads the bytes of a file. +func ReadBytesForFile(filename string) ([]byte, error) { + // is the filename a url? + fileurl, _ := url.Parse(filename) + if fileurl.Scheme != "" { + // yes, fetch it + bytes, err := FetchFile(filename) + if err != nil { + return nil, err + } + return bytes, nil + } + // no, it's a local filename + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + return bytes, nil +} + +// ReadInfoFromBytes unmarshals a file as a yaml.MapSlice. +func ReadInfoFromBytes(filename string, bytes []byte) (interface{}, error) { + initializeInfoCache() + cachedInfo, ok := infoCache[filename] + if ok { + if verboseReader { + log.Printf("Cache hit info for file %s", filename) + } + return cachedInfo, nil + } + if verboseReader { + log.Printf("Reading info for file %s", filename) + } + var info yaml.MapSlice + err := yaml.Unmarshal(bytes, &info) + if err != nil { + return nil, err + } + if len(filename) > 0 { + infoCache[filename] = info + } + return info, nil +} + +// ReadInfoForRef reads a file and return the fragment needed to resolve a $ref. +func ReadInfoForRef(basefile string, ref string) (interface{}, error) { + initializeInfoCache() + { + info, ok := infoCache[ref] + if ok { + if verboseReader { + log.Printf("Cache hit for ref %s#%s", basefile, ref) + } + return info, nil + } + } + if verboseReader { + log.Printf("Reading info for ref %s#%s", basefile, ref) + } + count = count + 1 + basedir, _ := filepath.Split(basefile) + parts := strings.Split(ref, "#") + var filename string + if parts[0] != "" { + filename = basedir + parts[0] + } else { + filename = basefile + } + bytes, err := ReadBytesForFile(filename) + if err != nil { + return nil, err + } + info, err := ReadInfoFromBytes(filename, bytes) + if err != nil { + log.Printf("File error: %v\n", err) + } else { + if len(parts) > 1 { + path := strings.Split(parts[1], "/") + for i, key := range path { + if i > 0 { + m, ok := info.(yaml.MapSlice) + if ok { + found := false + for _, section := range m { + if section.Key == key { + info = section.Value + found = true + } + } + if !found { + infoCache[ref] = nil + return nil, NewError(nil, fmt.Sprintf("could not resolve %s", ref)) + } + } + } + } + } + } + infoCache[ref] = info + return info, nil +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh new file mode 100755 index 000000000000..68d02a02ac5d --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/COMPILE-EXTENSION.sh @@ -0,0 +1,5 @@ +go get github.com/golang/protobuf/protoc-gen-go + +protoc \ +--go_out=Mgoogle/protobuf/any.proto=github.com/golang/protobuf/ptypes/any:. *.proto + diff --git a/vendor/github.com/googleapis/gnostic/extensions/README.md b/vendor/github.com/googleapis/gnostic/extensions/README.md new file mode 100644 index 000000000000..ff1c2eb1e3ad --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/README.md @@ -0,0 +1,5 @@ +# Extensions + +This directory contains support code for building Gnostic extensions and associated examples. + +Extensions are used to compile vendor or specification extensions into protocol buffer structures. diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go new file mode 100644 index 000000000000..749ff784166b --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.pb.go @@ -0,0 +1,218 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: extension.proto + +/* +Package openapiextension_v1 is a generated protocol buffer package. + +It is generated from these files: + extension.proto + +It has these top-level messages: + Version + ExtensionHandlerRequest + ExtensionHandlerResponse + Wrapper +*/ +package openapiextension_v1 + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/golang/protobuf/ptypes/any" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The version number of OpenAPI compiler. +type Version struct { + Major int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil { + return m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil { + return m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil { + return m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil { + return m.Suffix + } + return "" +} + +// An encoded Request is written to the ExtensionHandler's stdin. +type ExtensionHandlerRequest struct { + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper *Wrapper `protobuf:"bytes,1,opt,name=wrapper" json:"wrapper,omitempty"` + // The version number of openapi compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` +} + +func (m *ExtensionHandlerRequest) Reset() { *m = ExtensionHandlerRequest{} } +func (m *ExtensionHandlerRequest) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerRequest) ProtoMessage() {} +func (*ExtensionHandlerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ExtensionHandlerRequest) GetWrapper() *Wrapper { + if m != nil { + return m.Wrapper + } + return nil +} + +func (m *ExtensionHandlerRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +type ExtensionHandlerResponse struct { + // true if the extension is handled by the extension handler; false otherwise + Handled bool `protobuf:"varint,1,opt,name=handled" json:"handled,omitempty"` + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + Error []string `protobuf:"bytes,2,rep,name=error" json:"error,omitempty"` + // text output + Value *google_protobuf.Any `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"` +} + +func (m *ExtensionHandlerResponse) Reset() { *m = ExtensionHandlerResponse{} } +func (m *ExtensionHandlerResponse) String() string { return proto.CompactTextString(m) } +func (*ExtensionHandlerResponse) ProtoMessage() {} +func (*ExtensionHandlerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *ExtensionHandlerResponse) GetHandled() bool { + if m != nil { + return m.Handled + } + return false +} + +func (m *ExtensionHandlerResponse) GetError() []string { + if m != nil { + return m.Error + } + return nil +} + +func (m *ExtensionHandlerResponse) GetValue() *google_protobuf.Any { + if m != nil { + return m.Value + } + return nil +} + +type Wrapper struct { + // version of the OpenAPI specification in which this extension was written. + Version string `protobuf:"bytes,1,opt,name=version" json:"version,omitempty"` + // Name of the extension + ExtensionName string `protobuf:"bytes,2,opt,name=extension_name,json=extensionName" json:"extension_name,omitempty"` + // Must be a valid yaml for the proto + Yaml string `protobuf:"bytes,3,opt,name=yaml" json:"yaml,omitempty"` +} + +func (m *Wrapper) Reset() { *m = Wrapper{} } +func (m *Wrapper) String() string { return proto.CompactTextString(m) } +func (*Wrapper) ProtoMessage() {} +func (*Wrapper) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Wrapper) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Wrapper) GetExtensionName() string { + if m != nil { + return m.ExtensionName + } + return "" +} + +func (m *Wrapper) GetYaml() string { + if m != nil { + return m.Yaml + } + return "" +} + +func init() { + proto.RegisterType((*Version)(nil), "openapiextension.v1.Version") + proto.RegisterType((*ExtensionHandlerRequest)(nil), "openapiextension.v1.ExtensionHandlerRequest") + proto.RegisterType((*ExtensionHandlerResponse)(nil), "openapiextension.v1.ExtensionHandlerResponse") + proto.RegisterType((*Wrapper)(nil), "openapiextension.v1.Wrapper") +} + +func init() { proto.RegisterFile("extension.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 357 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x4d, 0x4b, 0xc3, 0x40, + 0x18, 0x84, 0x49, 0xbf, 0x62, 0x56, 0x6c, 0x65, 0x2d, 0x1a, 0xc5, 0x43, 0x09, 0x08, 0x45, 0x64, + 0x4b, 0x15, 0xbc, 0xb7, 0x50, 0xd4, 0x8b, 0x2d, 0x7b, 0xa8, 0x37, 0xcb, 0x36, 0x7d, 0x9b, 0x46, + 0x92, 0xdd, 0x75, 0xf3, 0x61, 0xfb, 0x57, 0x3c, 0xfa, 0x4b, 0x25, 0xbb, 0x49, 0x3d, 0xa8, 0xb7, + 0xcc, 0xc3, 0x24, 0xef, 0xcc, 0x04, 0x75, 0x60, 0x9b, 0x02, 0x4f, 0x42, 0xc1, 0x89, 0x54, 0x22, + 0x15, 0xf8, 0x44, 0x48, 0xe0, 0x4c, 0x86, 0x3f, 0x3c, 0x1f, 0x5e, 0x9c, 0x07, 0x42, 0x04, 0x11, + 0x0c, 0xb4, 0x65, 0x99, 0xad, 0x07, 0x8c, 0xef, 0x8c, 0xdf, 0xf3, 0x91, 0x3d, 0x07, 0x55, 0x18, + 0x71, 0x17, 0x35, 0x63, 0xf6, 0x26, 0x94, 0x6b, 0xf5, 0xac, 0x7e, 0x93, 0x1a, 0xa1, 0x69, 0xc8, + 0x85, 0x72, 0x6b, 0x25, 0x2d, 0x44, 0x41, 0x25, 0x4b, 0xfd, 0x8d, 0x5b, 0x37, 0x54, 0x0b, 0x7c, + 0x8a, 0x5a, 0x49, 0xb6, 0x5e, 0x87, 0x5b, 0xb7, 0xd1, 0xb3, 0xfa, 0x0e, 0x2d, 0x95, 0xf7, 0x69, + 0xa1, 0xb3, 0x49, 0x15, 0xe8, 0x91, 0xf1, 0x55, 0x04, 0x8a, 0xc2, 0x7b, 0x06, 0x49, 0x8a, 0xef, + 0x91, 0xfd, 0xa1, 0x98, 0x94, 0x60, 0xee, 0x1e, 0xde, 0x5e, 0x92, 0x3f, 0x2a, 0x90, 0x17, 0xe3, + 0xa1, 0x95, 0x19, 0x3f, 0xa0, 0x63, 0x5f, 0xc4, 0x32, 0x8c, 0x40, 0x2d, 0x72, 0xd3, 0x40, 0x87, + 0xf9, 0xef, 0x03, 0x65, 0x4b, 0xda, 0xa9, 0xde, 0x2a, 0x81, 0x97, 0x23, 0xf7, 0x77, 0xb6, 0x44, + 0x0a, 0x9e, 0x00, 0x76, 0x91, 0xbd, 0xd1, 0x68, 0xa5, 0xc3, 0x1d, 0xd0, 0x4a, 0x16, 0x03, 0x80, + 0x52, 0x7a, 0x96, 0x7a, 0xdf, 0xa1, 0x46, 0xe0, 0x6b, 0xd4, 0xcc, 0x59, 0x94, 0x41, 0x99, 0xa4, + 0x4b, 0xcc, 0xf0, 0xa4, 0x1a, 0x9e, 0x8c, 0xf8, 0x8e, 0x1a, 0x8b, 0xf7, 0x8a, 0xec, 0xb2, 0x54, + 0x71, 0xa6, 0xaa, 0x60, 0xe9, 0xe1, 0x2a, 0x89, 0xaf, 0x50, 0x7b, 0xdf, 0x62, 0xc1, 0x59, 0x0c, + 0xfa, 0x37, 0x38, 0xf4, 0x68, 0x4f, 0x9f, 0x59, 0x0c, 0x18, 0xa3, 0xc6, 0x8e, 0xc5, 0x91, 0x3e, + 0xeb, 0x50, 0xfd, 0x3c, 0xbe, 0x41, 0x6d, 0xa1, 0x02, 0x12, 0x70, 0x91, 0xa4, 0xa1, 0x4f, 0xf2, + 0xe1, 0x18, 0x4f, 0x25, 0xf0, 0xd1, 0xec, 0x69, 0x5f, 0x77, 0x3e, 0x9c, 0x59, 0x5f, 0xb5, 0xfa, + 0x74, 0x34, 0x59, 0xb6, 0x74, 0xc4, 0xbb, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0x5c, 0x6b, + 0x80, 0x51, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extension.proto b/vendor/github.com/googleapis/gnostic/extensions/extension.proto new file mode 100644 index 000000000000..04856f913b17 --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extension.proto @@ -0,0 +1,93 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +import "google/protobuf/any.proto"; +package openapiextension.v1; + +// This option lets the proto compiler generate Java code inside the package +// name (see below) instead of inside an outer class. It creates a simpler +// developer experience by reducing one-level of name nesting and be +// consistent with most programming languages that don't support outer classes. +option java_multiple_files = true; + +// The Java outer classname should be the filename in UpperCamelCase. This +// class is only used to hold proto descriptor, so developers don't need to +// work with it directly. +option java_outer_classname = "OpenAPIExtensionV1"; + +// The Java package name must be proto package name with proper prefix. +option java_package = "org.gnostic.v1"; + +// A reasonable prefix for the Objective-C symbols generated from the package. +// It should at a minimum be 3 characters long, all uppercase, and convention +// is to use an abbreviation of the package name. Something short, but +// hopefully unique enough to not conflict with things that may come along in +// the future. 'GPB' is reserved for the protocol buffer implementation itself. +// +option objc_class_prefix = "OAE"; // "OpenAPI Extension" + +// The version number of OpenAPI compiler. +message Version { + int32 major = 1; + int32 minor = 2; + int32 patch = 3; + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + string suffix = 4; +} + +// An encoded Request is written to the ExtensionHandler's stdin. +message ExtensionHandlerRequest { + + // The OpenAPI descriptions that were explicitly listed on the command line. + // The specifications will appear in the order they are specified to gnostic. + Wrapper wrapper = 1; + + // The version number of openapi compiler. + Version compiler_version = 3; +} + +// The extensions writes an encoded ExtensionHandlerResponse to stdout. +message ExtensionHandlerResponse { + + // true if the extension is handled by the extension handler; false otherwise + bool handled = 1; + + // Error message. If non-empty, the extension handling failed. + // The extension handler process should exit with status code zero + // even if it reports an error in this way. + // + // This should be used to indicate errors which prevent the extension from + // operating as intended. Errors which indicate a problem in gnostic + // itself -- such as the input Document being unparseable -- should be + // reported by writing a message to stderr and exiting with a non-zero + // status code. + repeated string error = 2; + + // text output + google.protobuf.Any value = 3; +} + +message Wrapper { + // version of the OpenAPI specification in which this extension was written. + string version = 1; + + // Name of the extension + string extension_name = 2; + + // Must be a valid yaml for the proto + string yaml = 3; +} diff --git a/vendor/github.com/googleapis/gnostic/extensions/extensions.go b/vendor/github.com/googleapis/gnostic/extensions/extensions.go new file mode 100644 index 000000000000..94a8e62a776e --- /dev/null +++ b/vendor/github.com/googleapis/gnostic/extensions/extensions.go @@ -0,0 +1,82 @@ +// Copyright 2017 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package openapiextension_v1 + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/ptypes" +) + +type documentHandler func(version string, extensionName string, document string) +type extensionHandler func(name string, yamlInput string) (bool, proto.Message, error) + +func forInputYamlFromOpenapic(handler documentHandler) { + data, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println("File error:", err.Error()) + os.Exit(1) + } + if len(data) == 0 { + fmt.Println("No input data.") + os.Exit(1) + } + request := &ExtensionHandlerRequest{} + err = proto.Unmarshal(data, request) + if err != nil { + fmt.Println("Input error:", err.Error()) + os.Exit(1) + } + handler(request.Wrapper.Version, request.Wrapper.ExtensionName, request.Wrapper.Yaml) +} + +// ProcessExtension calles the handler for a specified extension. +func ProcessExtension(handleExtension extensionHandler) { + response := &ExtensionHandlerResponse{} + forInputYamlFromOpenapic( + func(version string, extensionName string, yamlInput string) { + var newObject proto.Message + var err error + + handled, newObject, err := handleExtension(extensionName, yamlInput) + if !handled { + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + + // If we reach here, then the extension is handled + response.Handled = true + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + response.Value, err = ptypes.MarshalAny(newObject) + if err != nil { + response.Error = append(response.Error, err.Error()) + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) + os.Exit(0) + } + }) + + responseBytes, _ := proto.Marshal(response) + os.Stdout.Write(responseBytes) +} diff --git a/vendor/github.com/gophercloud/gophercloud/.github/CONTRIBUTING.md b/vendor/github.com/gophercloud/gophercloud/.github/CONTRIBUTING.md new file mode 100644 index 000000000000..af0bdb979a8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.github/CONTRIBUTING.md @@ -0,0 +1,256 @@ +# Contributing to Gophercloud + +- [New Contributor Tutorial](#new-contributor-tutorial) +- [3 ways to get involved](#3-ways-to-get-involved) +- [Getting started](#getting-started) +- [Tests](#tests) +- [Style guide](#basic-style-guide) + +## New Contributor Tutorial + +For new contributors, we've put together a detailed tutorial +[here](https://github.com/gophercloud/gophercloud/tree/master/docs/contributor-tutorial)! + +## 3 ways to get involved + +There are three main ways you can get involved in our open-source project, and +each is described briefly below. + +### 1. Fixing bugs + +If you want to start fixing open bugs, we'd really appreciate that! Bug fixing +is central to any project. The best way to get started is by heading to our +[bug tracker](https://github.com/gophercloud/gophercloud/issues) and finding open +bugs that you think nobody is working on. It might be useful to comment on the +thread to see the current state of the issue and if anybody has made any +breakthroughs on it so far. + +### 2. Improving documentation + +Gophercloud's documentation is automatically generated from the source code +and can be read online at [godoc.org](https://godoc.org/github.com/gophercloud/gophercloud). + +If you feel that a certain section could be improved - whether it's to clarify +ambiguity, correct a technical mistake, or to fix a grammatical error - please +feel entitled to do so! We welcome doc pull requests with the same childlike +enthusiasm as any other contribution! + +### 3. Working on a new feature + +If you've found something we've left out, we'd love for you to add it! Please +first open an issue to indicate your interest to a core contributor - this +enables quick/early feedback and can help steer you in the right direction by +avoiding known issues. It might also help you avoid losing time implementing +something that might not ever work or is outside the scope of the project. + +While you're implementing the feature, one tip is to prefix your Pull Request +title with `[wip]` - then people know it's a work in progress. Once the PR is +ready for review, you can remove the `[wip]` tag and request a review. + +We ask that you do not submit a feature that you have not spent time researching +and testing first-hand in an actual OpenStack environment. While we appreciate +the contribution, submitting code which you are unfamiliar with is a risk to the +users who will ultimately use it. See our [acceptance tests readme](/acceptance) +for information about how you can create a local development environment to +better understand the feature you're working on. + +Please do not hesitate to ask questions or request clarification. Your +contribution is very much appreciated and we are happy to work with you to get +it merged. + +## Getting Started + +As a contributor you will need to setup your workspace in a slightly different +way than just downloading it. Here are the basic instructions: + +1. Configure your `$GOPATH` and run `go get` as described in the main +[README](/README.md#how-to-install) but add `-tags "fixtures acceptance"` to +get dependencies for unit and acceptance tests. + + ```bash + go get -tags "fixtures acceptance" github.com/gophercloud/gophercloud + ``` + +2. Move into the directory that houses your local repository: + + ```bash + cd ${GOPATH}/src/github.com/gophercloud/gophercloud + ``` + +3. Fork the `gophercloud/gophercloud` repository and update your remote refs. You +will need to rename the `origin` remote branch to `upstream`, and add your +fork as `origin` instead: + + ```bash + git remote rename origin upstream + git remote add origin git@github.com:/gophercloud.git + ``` + +4. Checkout the latest development branch: + + ```bash + git checkout master + ``` + +5. If you're working on something (discussed more in detail below), you will +need to checkout a new feature branch: + + ```bash + git checkout -b my-new-feature + ``` + +6. Use a standard text editor or IDE of your choice to make your changes to the code or documentation. Once finished, commit them. + + ```bash + git status + git add path/to/changed/file.go + git commit + ``` + +7. Submit your branch as a [Pull Request](https://help.github.com/articles/creating-a-pull-request/). When submitting a Pull Request, please follow our [Style Guide](https://github.com/gophercloud/gophercloud/blob/master/docs/STYLEGUIDE.md). + +> Further information about using Git can be found [here](https://git-scm.com/book/en/v2). + +Happy Hacking! + +## Tests + +When working on a new or existing feature, testing will be the backbone of your +work since it helps uncover and prevent regressions in the codebase. There are +two types of test we use in Gophercloud: unit tests and acceptance tests, which +are both described below. + +### Unit tests + +Unit tests are the fine-grained tests that establish and ensure the behavior +of individual units of functionality. We usually test on an +operation-by-operation basis (an operation typically being an API action) with +the use of mocking to set up explicit expectations. Each operation will set up +its HTTP response expectation, and then test how the system responds when fed +this controlled, pre-determined input. + +To make life easier, we've introduced a bunch of test helpers to simplify the +process of testing expectations with assertions: + +```go +import ( + "testing" + + "github.com/gophercloud/gophercloud/testhelper" +) + +func TestSomething(t *testing.T) { + result, err := Operation() + + testhelper.AssertEquals(t, "foo", result.Bar) + testhelper.AssertNoErr(t, err) +} + +func TestSomethingElse(t *testing.T) { + testhelper.CheckEquals(t, "expected", "actual") +} +``` + +`AssertEquals` and `AssertNoErr` will throw a fatal error if a value does not +match an expected value or if an error has been declared, respectively. You can +also use `CheckEquals` and `CheckNoErr` for the same purpose; the only difference +being that `t.Errorf` is raised rather than `t.Fatalf`. + +Here is a truncated example of mocked HTTP responses: + +```go +import ( + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +func TestGet(t *testing.T) { + // Setup the HTTP request multiplexer and server + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + // Test we're using the correct HTTP method + th.TestMethod(t, r, "GET") + + // Test we're setting the auth token + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + // Set the appropriate headers for our mocked response + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // Set the HTTP body + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "name": "private-network", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + // Call our API operation + network, err := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + + // Assert no errors and equality + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Status, "ACTIVE") +} +``` + +### Acceptance tests + +As we've already mentioned, unit tests have a very narrow and confined focus - +they test small units of behavior. Acceptance tests on the other hand have a +far larger scope: they are fully functional tests that test the entire API of a +service in one fell swoop. They don't care about unit isolation or mocking +expectations, they instead do a full run-through and consequently test how the +entire system _integrates_ together. When an API satisfies expectations, it +proves by default that the requirements for a contract have been met. + +Please be aware that acceptance tests will hit a live API - and may incur +service charges from your provider. Although most tests handle their own +teardown procedures, it is always worth manually checking that resources are +deleted after the test suite finishes. + +We provide detailed information about how to set up local acceptance test +environments in our [acceptance tests readme](/acceptance). + +### Running tests + +To run all tests: + + ```bash + go test -tags fixtures ./... + ``` + +To run all tests with verbose output: + + ```bash + go test -v -tags fixtures ./... + ``` + +To run tests that match certain [build tags](): + + ```bash + go test -tags "fixtures foo bar" ./... + ``` + +To run tests for a particular sub-package: + + ```bash + cd ./path/to/package && go test -tags fixtures ./... + ``` + +## Style guide + +See [here](/docs/STYLEGUIDE.md) diff --git a/vendor/github.com/gophercloud/gophercloud/.github/ISSUE_TEMPLATE b/vendor/github.com/gophercloud/gophercloud/.github/ISSUE_TEMPLATE new file mode 100644 index 000000000000..2c3be773ad01 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.github/ISSUE_TEMPLATE @@ -0,0 +1 @@ +Before starting a PR, please read our [contributor tutorial](https://github.com/gophercloud/gophercloud/tree/master/docs/contributor-tutorial). diff --git a/vendor/github.com/gophercloud/gophercloud/.github/PULL_REQUEST_TEMPLATE b/vendor/github.com/gophercloud/gophercloud/.github/PULL_REQUEST_TEMPLATE new file mode 100644 index 000000000000..ea3abc8f88fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.github/PULL_REQUEST_TEMPLATE @@ -0,0 +1,12 @@ +Prior to starting a PR, please make sure you have read our +[contributor tutorial](https://github.com/gophercloud/gophercloud/tree/master/docs/contributor-tutorial). + +Prior to a PR being reviewed, there needs to be a Github issue that the PR +addresses. Replace the brackets and text below with that issue number. + +For #[PUT ISSUE NUMBER HERE] + +Links to the line numbers/files in the OpenStack source code that support the +code in this PR: + +[PUT URLS HERE] diff --git a/vendor/github.com/gophercloud/gophercloud/.gitignore b/vendor/github.com/gophercloud/gophercloud/.gitignore new file mode 100644 index 000000000000..dd91ed205597 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.gitignore @@ -0,0 +1,3 @@ +**/*.swp +.idea +.vscode diff --git a/vendor/github.com/gophercloud/gophercloud/.travis.yml b/vendor/github.com/gophercloud/gophercloud/.travis.yml new file mode 100644 index 000000000000..02728f496825 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.travis.yml @@ -0,0 +1,21 @@ +language: go +sudo: false +install: +- go get golang.org/x/crypto/ssh +- go get -v -tags 'fixtures acceptance' ./... +- go get github.com/wadey/gocovmerge +- go get github.com/mattn/goveralls +- go get golang.org/x/tools/cmd/goimports +go: +- "1.10" +- "tip" +env: + global: + - secure: "xSQsAG5wlL9emjbCdxzz/hYQsSpJ/bABO1kkbwMSISVcJ3Nk0u4ywF+LS4bgeOnwPfmFvNTOqVDu3RwEvMeWXSI76t1piCPcObutb2faKLVD/hLoAS76gYX+Z8yGWGHrSB7Do5vTPj1ERe2UljdrnsSeOXzoDwFxYRaZLX4bBOB4AyoGvRniil5QXPATiA1tsWX1VMicj8a4F8X+xeESzjt1Q5Iy31e7vkptu71bhvXCaoo5QhYwT+pLR9dN0S1b7Ro0KVvkRefmr1lUOSYd2e74h6Lc34tC1h3uYZCS4h47t7v5cOXvMNxinEj2C51RvbjvZI1RLVdkuAEJD1Iz4+Ote46nXbZ//6XRZMZz/YxQ13l7ux1PFjgEB6HAapmF5Xd8PRsgeTU9LRJxpiTJ3P5QJ3leS1va8qnziM5kYipj/Rn+V8g2ad/rgkRox9LSiR9VYZD2Pe45YCb1mTKSl2aIJnV7nkOqsShY5LNB4JZSg7xIffA+9YVDktw8dJlATjZqt7WvJJ49g6A61mIUV4C15q2JPGKTkZzDiG81NtmS7hFa7k0yaE2ELgYocbcuyUcAahhxntYTC0i23nJmEHVNiZmBO3u7EgpWe4KGVfumU+lt12tIn5b3dZRBBUk3QakKKozSK1QPHGpk/AZGrhu7H6l8to6IICKWtDcyMPQ=" +before_script: +- go vet ./... +script: +- ./script/coverage +- ./script/format +after_success: +- $HOME/gopath/bin/goveralls -service=travis-ci -coverprofile=cover.out diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml new file mode 100644 index 000000000000..92fd9d482684 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul.yaml @@ -0,0 +1,86 @@ +- job: + name: gophercloud-unittest + parent: golang-test + description: | + Run gophercloud unit test + run: .zuul/playbooks/gophercloud-unittest/run.yaml + nodeset: ubuntu-xenial-ut + +- job: + name: gophercloud-acceptance-test + parent: golang-test + description: | + Run gophercloud acceptance test on master branch + run: .zuul/playbooks/gophercloud-acceptance-test/run.yaml + +- job: + name: gophercloud-acceptance-test-queens + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on queens branch + vars: + global_env: + OS_BRANCH: stable/queens + +- job: + name: gophercloud-acceptance-test-pike + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on pike branch + vars: + global_env: + OS_BRANCH: stable/pike + +- job: + name: gophercloud-acceptance-test-ocata + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on ocata branch + vars: + global_env: + OS_BRANCH: stable/ocata + +- job: + name: gophercloud-acceptance-test-newton + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on newton branch + vars: + global_env: + OS_BRANCH: stable/newton + +- job: + name: gophercloud-acceptance-test-mitaka + parent: gophercloud-acceptance-test + description: | + Run gophercloud acceptance test on mitaka branch + vars: + global_env: + OS_BRANCH: stable/mitaka + nodeset: ubuntu-trusty + +- project: + name: gophercloud/gophercloud + check: + jobs: + - gophercloud-unittest + - gophercloud-acceptance-test + recheck-mitaka: + jobs: + - gophercloud-acceptance-test-mitaka + recheck-newton: + jobs: + - gophercloud-acceptance-test-newton + recheck-ocata: + jobs: + - gophercloud-acceptance-test-ocata + recheck-pike: + jobs: + - gophercloud-acceptance-test-pike + recheck-queens: + jobs: + - gophercloud-acceptance-test-queens + periodic: + jobs: + - gophercloud-unittest + - gophercloud-acceptance-test diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-acceptance-test/run.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-acceptance-test/run.yaml new file mode 100644 index 000000000000..ce8635a44017 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-acceptance-test/run.yaml @@ -0,0 +1,26 @@ +- hosts: all + become: yes + roles: + - config-golang + - clone-devstack-gate-to-workspace + - role: create-devstack-local-conf + enable_services: + - 'manila' + - 'designate' + - 'zun' + - 'octavia' + - install-devstack + tasks: + - name: Run acceptance tests with gophercloud + shell: + cmd: | + set -e + set -o pipefail + set -x + echo $(export |grep OS_BRANCH) + + go get ./... || true + ./script/acceptancetest -v 2>&1 | tee $TEST_RESULTS_TXT + executable: /bin/bash + chdir: '{{ zuul.project.src_dir }}' + environment: '{{ global_env }}' diff --git a/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-unittest/run.yaml b/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-unittest/run.yaml new file mode 100644 index 000000000000..feefa54f2eed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/.zuul/playbooks/gophercloud-unittest/run.yaml @@ -0,0 +1,16 @@ +- hosts: all + become: yes + roles: + - config-golang + tasks: + - name: Run unit tests with gophercloud + shell: + cmd: | + set -e + set -o pipefail + set -x + go get ./... || true + ./script/unittest -v 2>&1 | tee $TEST_RESULTS_TXT + executable: /bin/bash + chdir: '{{ zuul.project.src_dir }}' + environment: '{{ global_env }}' diff --git a/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md b/vendor/github.com/gophercloud/gophercloud/CHANGELOG.md new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/vendor/github.com/gophercloud/gophercloud/LICENSE b/vendor/github.com/gophercloud/gophercloud/LICENSE index 5e86b007c630..fbbbc9e4cbad 100644 --- a/vendor/github.com/gophercloud/gophercloud/LICENSE +++ b/vendor/github.com/gophercloud/gophercloud/LICENSE @@ -9,8 +9,10 @@ License at Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. +specific language governing permissions and limitations under the License. +------ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ diff --git a/vendor/github.com/gophercloud/gophercloud/README.md b/vendor/github.com/gophercloud/gophercloud/README.md new file mode 100644 index 000000000000..ad29041d9bfe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/README.md @@ -0,0 +1,159 @@ +# Gophercloud: an OpenStack SDK for Go +[![Build Status](https://travis-ci.org/gophercloud/gophercloud.svg?branch=master)](https://travis-ci.org/gophercloud/gophercloud) +[![Coverage Status](https://coveralls.io/repos/github/gophercloud/gophercloud/badge.svg?branch=master)](https://coveralls.io/github/gophercloud/gophercloud?branch=master) + +Gophercloud is an OpenStack Go SDK. + +## Useful links + +* [Reference documentation](http://godoc.org/github.com/gophercloud/gophercloud) +* [Effective Go](https://golang.org/doc/effective_go.html) + +## How to install + +Before installing, you need to ensure that your [GOPATH environment variable](https://golang.org/doc/code.html#GOPATH) +is pointing to an appropriate directory where you want to install Gophercloud: + +```bash +mkdir $HOME/go +export GOPATH=$HOME/go +``` + +To protect yourself against changes in your dependencies, we highly recommend choosing a +[dependency management solution](https://github.com/golang/go/wiki/PackageManagementTools) for +your projects, such as [godep](https://github.com/tools/godep). Once this is set up, you can install +Gophercloud as a dependency like so: + +```bash +go get github.com/gophercloud/gophercloud + +# Edit your code to import relevant packages from "github.com/gophercloud/gophercloud" + +godep save ./... +``` + +This will install all the source files you need into a `Godeps/_workspace` directory, which is +referenceable from your own source files when you use the `godep go` command. + +## Getting started + +### Credentials + +Because you'll be hitting an API, you will need to retrieve your OpenStack +credentials and either store them as environment variables or in your local Go +files. The first method is recommended because it decouples credential +information from source code, allowing you to push the latter to your version +control system without any security risk. + +You will need to retrieve the following: + +* username +* password +* a valid Keystone identity URL + +For users that have the OpenStack dashboard installed, there's a shortcut. If +you visit the `project/access_and_security` path in Horizon and click on the +"Download OpenStack RC File" button at the top right hand corner, you will +download a bash file that exports all of your access details to environment +variables. To execute the file, run `source admin-openrc.sh` and you will be +prompted for your password. + +### Authentication + +Once you have access to your credentials, you can begin plugging them into +Gophercloud. The next step is authentication, and this is handled by a base +"Provider" struct. To get one, you can either pass in your credentials +explicitly, or tell Gophercloud to use environment variables: + +```go +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// Option 1: Pass in the values yourself +opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", +} + +// Option 2: Use a utility function to retrieve all your environment variables +opts, err := openstack.AuthOptionsFromEnv() +``` + +Once you have the `opts` variable, you can pass it in and get back a +`ProviderClient` struct: + +```go +provider, err := openstack.AuthenticatedClient(opts) +``` + +The `ProviderClient` is the top-level client that all of your OpenStack services +derive from. The provider contains all of the authentication details that allow +your Go code to access the API - such as the base URL and token ID. + +### Provision a server + +Once we have a base Provider, we inject it as a dependency into each OpenStack +service. In order to work with the Compute API, we need a Compute service +client; which can be created like so: + +```go +client, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), +}) +``` + +We then use this `client` for any Compute API operation we want. In our case, +we want to provision a new server - so we invoke the `Create` method and pass +in the flavor ID (hardware specification) and image ID (operating system) we're +interested in: + +```go +import "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + +server, err := servers.Create(client, servers.CreateOpts{ + Name: "My new server!", + FlavorRef: "flavor_id", + ImageRef: "image_id", +}).Extract() +``` + +The above code sample creates a new server with the parameters, and embodies the +new resource in the `server` variable (a +[`servers.Server`](http://godoc.org/github.com/gophercloud/gophercloud) struct). + +## Advanced Usage + +Have a look at the [FAQ](./docs/FAQ.md) for some tips on customizing the way Gophercloud works. + +## Backwards-Compatibility Guarantees + +None. Vendor it and write tests covering the parts you use. + +## Contributing + +See the [contributing guide](./.github/CONTRIBUTING.md). + +## Help and feedback + +If you're struggling with something or have spotted a potential bug, feel free +to submit an issue to our [bug tracker](https://github.com/gophercloud/gophercloud/issues). + +## Thank You + +We'd like to extend special thanks and appreciation to the following: + +### OpenLab + + + +OpenLab is providing a full CI environment to test each PR and merge for a variety of OpenStack releases. + +### VEXXHOST + + + +VEXXHOST is providing their services to assist with the development and testing of Gophercloud. diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/README.md b/vendor/github.com/gophercloud/gophercloud/acceptance/README.md new file mode 100644 index 000000000000..ab3569574836 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/README.md @@ -0,0 +1,113 @@ +# Gophercloud Acceptance tests + +The purpose of these acceptance tests is to validate that SDK features meet +the requirements of a contract - to consumers, other parts of the library, and +to a remote API. + +> **Note:** Because every test will be run against a real API endpoint, you +> may incur bandwidth and service charges for all the resource usage. These +> tests *should* remove their remote products automatically. However, there may +> be certain cases where this does not happen; always double-check to make sure +> you have no stragglers left behind. + +### Step 1. Creating a Testing Environment + +Running tests on an existing OpenStack cloud can be risky. Malformed tests, +especially ones which require Admin privileges, can cause damage to the +environment. Additionally, you may incur bandwidth and service charges for +the resources used, as mentioned in the note above. + +Therefore, it is usually best to first practice running acceptance tests in +an isolated test environment. Two options to easily create a testing +environment are [DevStack](https://docs.openstack.org/devstack/latest/) +and [PackStack](https://www.rdoproject.org/install/packstack/). + +The following blog posts detail how to create reusable PackStack environments. +These posts were written with Gophercloud in mind: + +* http://terrarum.net/blog/building-openstack-environments.html +* http://terrarum.net/blog/building-openstack-environments-2.html +* http://terrarum.net/blog/building-openstack-environments-3.html + +### Step 2. Set environment variables + +A lot of tests rely on environment variables for configuration - so you will need +to set them before running the suite. If you're testing against pure OpenStack APIs, +you can download a file that contains all of these variables for you: just visit +the `project/access_and_security` page in your control panel and click the "Download +OpenStack RC File" button at the top right. For all other providers, you will need +to set them manually. + +#### Authentication + +|Name|Description| +|---|---| +|`OS_USERNAME`|Your API username| +|`OS_PASSWORD`|Your API password| +|`OS_AUTH_URL`|The identity URL you need to authenticate| +|`OS_TENANT_NAME`|Your API tenant name| +|`OS_TENANT_ID`|Your API tenant ID| + +#### General + +|Name|Description| +|---|---| +|`OS_REGION_NAME`|The region you want your resources to reside in| + +#### Compute + +|Name|Description| +|---|---| +|`OS_IMAGE_ID`|The ID of the image your want your server to be based on| +|`OS_FLAVOR_ID`|The ID of the flavor you want your server to be based on| +|`OS_FLAVOR_ID_RESIZE`|The ID of the flavor you want your server to be resized to| +|`OS_POOL_NAME`|The Pool from where to obtain Floating IPs| +|`OS_NETWORK_NAME`|The internal/private network to launch instances on| +|`OS_EXTGW_ID`|The external/public network| + +#### Database + +|Name|Description| +|---|---| +|`OS_DB_DATASTORE_TYPE`|The Datastore type to use. Example: `mariadb`| +|`OS_DB_DATASTORE_VERSION`|The Datastore version to use. Example: `mariadb-10`| + +#### Shared file systems +|Name|Description| +|---|---| +|`OS_SHARE_NETWORK_ID`| The share network ID to use when creating shares| + +### 3. Run the test suite + +From the root directory, run: + +``` +./script/acceptancetest +``` + +Alternatively, add the following to your `.bashrc`: + +```bash +gophercloudtest() { + if [[ -n $1 ]] && [[ -n $2 ]]; then + pushd $GOPATH/src/github.com/gophercloud/gophercloud + go test -v -tags "fixtures acceptance" -run "$1" github.com/gophercloud/gophercloud/acceptance/openstack/$2 | tee ~/gophercloud.log + popd +fi +} +``` + +Then run either groups or individual tests by doing: + +```shell +$ gophercloudtest TestFlavorsList compute/v2 +$ gophercloudtest TestFlavors compute/v2 +$ gophercloudtest Test compute/v2 +``` + +### 4. Notes + +#### Compute Tests + +* In order to run the `TestBootFromVolumeMultiEphemeral` test, a flavor with ephemeral disk space must be used. +* The `TestDefSecRules` tests require a compatible network driver and admin privileges. diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/clients/clients.go b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/clients.go new file mode 100644 index 000000000000..3f446d7e6633 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/clients.go @@ -0,0 +1,630 @@ +// Package clients contains functions for creating OpenStack service clients +// for use in acceptance tests. It also manages the required environment +// variables to run the tests. +package clients + +import ( + "fmt" + "net/http" + "os" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/blockstorage/noauth" +) + +// AcceptanceTestChoices contains image and flavor selections for use by the acceptance tests. +type AcceptanceTestChoices struct { + // ImageID contains the ID of a valid image. + ImageID string + + // FlavorID contains the ID of a valid flavor. + FlavorID string + + // FlavorIDResize contains the ID of a different flavor available on the same OpenStack installation, that is distinct + // from FlavorID. + FlavorIDResize string + + // FloatingIPPool contains the name of the pool from where to obtain floating IPs. + FloatingIPPoolName string + + // MagnumKeypair contains the ID of a valid key pair. + MagnumKeypair string + + // MagnumImageID contains the ID of a valid magnum image. + MagnumImageID string + + // NetworkName is the name of a network to launch the instance on. + NetworkName string + + // ExternalNetworkID is the network ID of the external network. + ExternalNetworkID string + + // ShareNetworkID is the Manila Share network ID + ShareNetworkID string + + // DBDatastoreType is the datastore type for DB tests. + DBDatastoreType string + + // DBDatastoreTypeID is the datastore type version for DB tests. + DBDatastoreVersion string +} + +// AcceptanceTestChoicesFromEnv populates a ComputeChoices struct from environment variables. +// If any required state is missing, an `error` will be returned that enumerates the missing properties. +func AcceptanceTestChoicesFromEnv() (*AcceptanceTestChoices, error) { + imageID := os.Getenv("OS_IMAGE_ID") + flavorID := os.Getenv("OS_FLAVOR_ID") + flavorIDResize := os.Getenv("OS_FLAVOR_ID_RESIZE") + magnumImageID := os.Getenv("OS_MAGNUM_IMAGE_ID") + magnumKeypair := os.Getenv("OS_MAGNUM_KEYPAIR") + networkName := os.Getenv("OS_NETWORK_NAME") + floatingIPPoolName := os.Getenv("OS_POOL_NAME") + externalNetworkID := os.Getenv("OS_EXTGW_ID") + shareNetworkID := os.Getenv("OS_SHARE_NETWORK_ID") + dbDatastoreType := os.Getenv("OS_DB_DATASTORE_TYPE") + dbDatastoreVersion := os.Getenv("OS_DB_DATASTORE_VERSION") + + missing := make([]string, 0, 3) + if imageID == "" { + missing = append(missing, "OS_IMAGE_ID") + } + if flavorID == "" { + missing = append(missing, "OS_FLAVOR_ID") + } + if flavorIDResize == "" { + missing = append(missing, "OS_FLAVOR_ID_RESIZE") + } + if floatingIPPoolName == "" { + missing = append(missing, "OS_POOL_NAME") + } + if externalNetworkID == "" { + missing = append(missing, "OS_EXTGW_ID") + } + if networkName == "" { + networkName = "private" + } + if shareNetworkID == "" { + missing = append(missing, "OS_SHARE_NETWORK_ID") + } + notDistinct := "" + if flavorID == flavorIDResize { + notDistinct = "OS_FLAVOR_ID and OS_FLAVOR_ID_RESIZE must be distinct." + } + + if len(missing) > 0 || notDistinct != "" { + text := "You're missing some important setup:\n" + if len(missing) > 0 { + text += " * These environment variables must be provided: " + strings.Join(missing, ", ") + "\n" + } + if notDistinct != "" { + text += " * " + notDistinct + "\n" + } + + return nil, fmt.Errorf(text) + } + + return &AcceptanceTestChoices{ + ImageID: imageID, + FlavorID: flavorID, + FlavorIDResize: flavorIDResize, + FloatingIPPoolName: floatingIPPoolName, + MagnumImageID: magnumImageID, + MagnumKeypair: magnumKeypair, + NetworkName: networkName, + ExternalNetworkID: externalNetworkID, + ShareNetworkID: shareNetworkID, + DBDatastoreType: dbDatastoreType, + DBDatastoreVersion: dbDatastoreVersion, + }, nil +} + +// NewBlockStorageV1Client returns a *ServiceClient for making calls +// to the OpenStack Block Storage v1 API. An error will be returned +// if authentication or client creation was not possible. +func NewBlockStorageV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewBlockStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewBlockStorageV2Client returns a *ServiceClient for making calls +// to the OpenStack Block Storage v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewBlockStorageV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewBlockStorageV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewBlockStorageV3Client returns a *ServiceClient for making calls +// to the OpenStack Block Storage v3 API. An error will be returned +// if authentication or client creation was not possible. +func NewBlockStorageV3Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewBlockStorageV3(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewBlockStorageV2NoAuthClient returns a noauth *ServiceClient for +// making calls to the OpenStack Block Storage v2 API. An error will be +// returned if client creation was not possible. +func NewBlockStorageV2NoAuthClient() (*gophercloud.ServiceClient, error) { + client, err := noauth.NewClient(gophercloud.AuthOptions{ + Username: os.Getenv("OS_USERNAME"), + TenantName: os.Getenv("OS_TENANT_NAME"), + }) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return noauth.NewBlockStorageNoAuth(client, noauth.EndpointOpts{ + CinderEndpoint: os.Getenv("CINDER_ENDPOINT"), + }) +} + +// NewBlockStorageV3NoAuthClient returns a noauth *ServiceClient for +// making calls to the OpenStack Block Storage v2 API. An error will be +// returned if client creation was not possible. +func NewBlockStorageV3NoAuthClient() (*gophercloud.ServiceClient, error) { + client, err := noauth.NewClient(gophercloud.AuthOptions{ + Username: os.Getenv("OS_USERNAME"), + TenantName: os.Getenv("OS_TENANT_NAME"), + }) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return noauth.NewBlockStorageNoAuth(client, noauth.EndpointOpts{ + CinderEndpoint: os.Getenv("CINDER_ENDPOINT"), + }) +} + +// NewComputeV2Client returns a *ServiceClient for making calls +// to the OpenStack Compute v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewComputeV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewComputeV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewDBV1Client returns a *ServiceClient for making calls +// to the OpenStack Database v1 API. An error will be returned +// if authentication or client creation was not possible. +func NewDBV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewDBV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewDNSV2Client returns a *ServiceClient for making calls +// to the OpenStack Compute v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewDNSV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewDNSV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewIdentityV2Client returns a *ServiceClient for making calls +// to the OpenStack Identity v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewIdentityV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewIdentityV2AdminClient returns a *ServiceClient for making calls +// to the Admin Endpoint of the OpenStack Identity v2 API. An error +// will be returned if authentication or client creation was not possible. +func NewIdentityV2AdminClient() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + Availability: gophercloud.AvailabilityAdmin, + }) +} + +// NewIdentityV2UnauthenticatedClient returns an unauthenticated *ServiceClient +// for the OpenStack Identity v2 API. An error will be returned if +// authentication or client creation was not possible. +func NewIdentityV2UnauthenticatedClient() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewIdentityV2(client, gophercloud.EndpointOpts{}) +} + +// NewIdentityV3Client returns a *ServiceClient for making calls +// to the OpenStack Identity v3 API. An error will be returned +// if authentication or client creation was not possible. +func NewIdentityV3Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewIdentityV3(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewIdentityV3UnauthenticatedClient returns an unauthenticated *ServiceClient +// for the OpenStack Identity v3 API. An error will be returned if +// authentication or client creation was not possible. +func NewIdentityV3UnauthenticatedClient() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewIdentityV3(client, gophercloud.EndpointOpts{}) +} + +// NewImageServiceV2Client returns a *ServiceClient for making calls to the +// OpenStack Image v2 API. An error will be returned if authentication or +// client creation was not possible. +func NewImageServiceV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewImageServiceV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewNetworkV2Client returns a *ServiceClient for making calls to the +// OpenStack Networking v2 API. An error will be returned if authentication +// or client creation was not possible. +func NewNetworkV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewObjectStorageV1Client returns a *ServiceClient for making calls to the +// OpenStack Object Storage v1 API. An error will be returned if authentication +// or client creation was not possible. +func NewObjectStorageV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewSharedFileSystemV2Client returns a *ServiceClient for making calls +// to the OpenStack Shared File System v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewSharedFileSystemV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewSharedFileSystemV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewLoadBalancerV2Client returns a *ServiceClient for making calls to the +// OpenStack Octavia v2 API. An error will be returned if authentication +// or client creation was not possible. +func NewLoadBalancerV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewLoadBalancerV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewClusteringV1Client returns a *ServiceClient for making calls +// to the OpenStack Clustering v1 API. An error will be returned +// if authentication or client creation was not possible. +func NewClusteringV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewClusteringV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewMessagingV2Client returns a *ServiceClient for making calls +// to the OpenStack Messaging (Zaqar) v2 API. An error will be returned +// if authentication or client creation was not possible. +func NewMessagingV2Client(clientID string) (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewMessagingV2(client, clientID, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewContainerV1Client returns a *ServiceClient for making calls +// to the OpenStack Container V1 API. An error will be returned +// if authentication or client creation was not possible. +func NewContainerV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + return openstack.NewContainerV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewKeyManagerV1Client returns a *ServiceClient for making calls +// to the OpenStack Key Manager (Barbican) v1 API. An error will be +// returned if authentication or client creation was not possible. +func NewKeyManagerV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewKeyManagerV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// configureDebug will configure the provider client to print the API +// requests and responses if OS_DEBUG is enabled. +func configureDebug(client *gophercloud.ProviderClient) *gophercloud.ProviderClient { + if os.Getenv("OS_DEBUG") != "" { + client.HTTPClient = http.Client{ + Transport: &LogRoundTripper{ + Rt: &http.Transport{}, + }, + } + } + + return client +} + +// NewContainerInfraV1Client returns a *ServiceClient for making calls +// to the OpenStack Container Infra Management v1 API. An error will be returned +// if authentication or client creation was not possible. +func NewContainerInfraV1Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewContainerInfraV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} + +// NewWorkflowV2Client returns a *ServiceClient for making calls +// to the OpenStack Workflow v2 API (Mistral). An error will be returned if +// authentication or client creation failed. +func NewWorkflowV2Client() (*gophercloud.ServiceClient, error) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + return nil, err + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + return nil, err + } + + client = configureDebug(client) + + return openstack.NewWorkflowV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/clients/conditions.go b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/conditions.go new file mode 100644 index 000000000000..bc8b306d084f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/conditions.go @@ -0,0 +1,75 @@ +package clients + +import ( + "os" + "testing" +) + +// RequireAdmin will restrict a test to only be run by admin users. +func RequireAdmin(t *testing.T) { + if os.Getenv("OS_USERNAME") != "admin" { + t.Skip("must be admin to run this test") + } +} + +// RequireNonAdmin will restrict a test to only be run by non-admin users. +func RequireNonAdmin(t *testing.T) { + if os.Getenv("OS_USERNAME") == "admin" { + t.Skip("must be a non-admin to run this test") + } +} + +// RequireDNS will restrict a test to only be run in environments +// that support DNSaaS. +func RequireDNS(t *testing.T) { + if os.Getenv("OS_DNS_ENVIRONMENT") == "" { + t.Skip("this test requires DNSaaS") + } +} + +// RequireGuestAgent will restrict a test to only be run in +// environments that support the QEMU guest agent. +func RequireGuestAgent(t *testing.T) { + if os.Getenv("OS_GUEST_AGENT") == "" { + t.Skip("this test requires support for qemu guest agent and to set OS_GUEST_AGENT to 1") + } +} + +// RequireIdentityV2 will restrict a test to only be run in +// environments that support the Identity V2 API. +func RequireIdentityV2(t *testing.T) { + if os.Getenv("OS_IDENTITY_API_VERSION") != "2.0" { + t.Skip("this test requires support for the identity v2 API") + } +} + +// RequireLiveMigration will restrict a test to only be run in +// environments that support live migration. +func RequireLiveMigration(t *testing.T) { + if os.Getenv("OS_LIVE_MIGRATE") == "" { + t.Skip("this test requires support for live migration and to set OS_LIVE_MIGRATE to 1") + } +} + +// RequireLong will ensure long-running tests can run. +func RequireLong(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode") + } +} + +// RequireNovaNetwork will restrict a test to only be run in +// environments that support nova-network. +func RequireNovaNetwork(t *testing.T) { + if os.Getenv("OS_NOVANET") == "" { + t.Skip("this test requires nova-network and to set OS_NOVANET to 1") + } +} + +// SkipRelease will have the test be skipped on a certain +// release. Releases are named such as 'stable/mitaka', master, etc. +func SkipRelease(t *testing.T, release string) { + if os.Getenv("OS_BRANCH") == release { + t.Skipf("this is not supported in %s", release) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/clients/http.go b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/http.go new file mode 100644 index 000000000000..ed09a1ac52b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/clients/http.go @@ -0,0 +1,168 @@ +package clients + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "log" + "net/http" + "sort" + "strings" +) + +// List of headers that need to be redacted +var REDACT_HEADERS = []string{"x-auth-token", "x-auth-key", "x-service-token", + "x-storage-token", "x-account-meta-temp-url-key", "x-account-meta-temp-url-key-2", + "x-container-meta-temp-url-key", "x-container-meta-temp-url-key-2", "set-cookie", + "x-subject-token"} + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default http client RoundTripper to allow logging. +type LogRoundTripper struct { + Rt http.RoundTripper +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information +// about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + defer func() { + if request.Body != nil { + request.Body.Close() + } + }() + + var err error + + log.Printf("[DEBUG] OpenStack Request URL: %s %s", request.Method, request.URL) + log.Printf("[DEBUG] OpenStack request Headers:\n%s", formatHeaders(request.Header)) + + if request.Body != nil { + request.Body, err = lrt.logRequest(request.Body, request.Header.Get("Content-Type")) + if err != nil { + return nil, err + } + } + + response, err := lrt.Rt.RoundTrip(request) + if response == nil { + return nil, err + } + + log.Printf("[DEBUG] OpenStack Response Code: %d", response.StatusCode) + log.Printf("[DEBUG] OpenStack Response Headers:\n%s", formatHeaders(response.Header)) + + response.Body, err = lrt.logResponse(response.Body, response.Header.Get("Content-Type")) + + return response, err +} + +// logRequest will log the HTTP Request details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logRequest(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + defer original.Close() + + var bs bytes.Buffer + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + + // Handle request contentType + if strings.HasPrefix(contentType, "application/json") { + debugInfo := lrt.formatJSON(bs.Bytes()) + log.Printf("[DEBUG] OpenStack Request Body: %s", debugInfo) + } + + return ioutil.NopCloser(strings.NewReader(bs.String())), nil +} + +// logResponse will log the HTTP Response details. +// If the body is JSON, it will attempt to be pretty-formatted. +func (lrt *LogRoundTripper) logResponse(original io.ReadCloser, contentType string) (io.ReadCloser, error) { + if strings.HasPrefix(contentType, "application/json") { + var bs bytes.Buffer + defer original.Close() + _, err := io.Copy(&bs, original) + if err != nil { + return nil, err + } + debugInfo := lrt.formatJSON(bs.Bytes()) + if debugInfo != "" { + log.Printf("[DEBUG] OpenStack Response Body: %s", debugInfo) + } + return ioutil.NopCloser(strings.NewReader(bs.String())), nil + } + + log.Printf("[DEBUG] Not logging because OpenStack response body isn't JSON") + return original, nil +} + +// formatJSON will try to pretty-format a JSON body. +// It will also mask known fields which contain sensitive information. +func (lrt *LogRoundTripper) formatJSON(raw []byte) string { + var data map[string]interface{} + + err := json.Unmarshal(raw, &data) + if err != nil { + log.Printf("[DEBUG] Unable to parse OpenStack JSON: %s", err) + return string(raw) + } + + // Mask known password fields + if v, ok := data["auth"].(map[string]interface{}); ok { + if v, ok := v["identity"].(map[string]interface{}); ok { + if v, ok := v["password"].(map[string]interface{}); ok { + if v, ok := v["user"].(map[string]interface{}); ok { + v["password"] = "***" + } + } + } + } + + // Ignore the catalog + if v, ok := data["token"].(map[string]interface{}); ok { + if _, ok := v["catalog"]; ok { + return "" + } + } + + pretty, err := json.MarshalIndent(data, "", " ") + if err != nil { + log.Printf("[DEBUG] Unable to re-marshal OpenStack JSON: %s", err) + return string(raw) + } + + return string(pretty) +} + +// redactHeaders processes a headers object, returning a redacted list +func redactHeaders(headers http.Header) (processedHeaders []string) { + for name, header := range headers { + var sensitive bool + + for _, redact_header := range REDACT_HEADERS { + if strings.ToLower(name) == strings.ToLower(redact_header) { + sensitive = true + } + } + + for _, v := range header { + if sensitive { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, "***")) + } else { + processedHeaders = append(processedHeaders, fmt.Sprintf("%v: %v", name, v)) + } + } + } + return +} + +// formatHeaders processes a headers object plus a deliminator, returning a string +func formatHeaders(headers http.Header) string { + redactedHeaders := redactHeaders(headers) + sort.Strings(redactedHeaders) + + return strings.Join(redactedHeaders, "\n") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/extensions.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/extensions.go new file mode 100644 index 000000000000..2856b8b655a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/extensions.go @@ -0,0 +1,173 @@ +// Package extensions contains common functions for creating block storage +// resources that are extensions of the block storage API. See the `*_test.go` +// files for example usages. +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// CreateUploadImage will upload volume it as volume-baked image. An name of new image or err will be +// returned +func CreateUploadImage(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) (volumeactions.VolumeImage, error) { + if testing.Short() { + t.Skip("Skipping test that requires volume-backed image uploading in short mode.") + } + + imageName := tools.RandomString("ACPTTEST", 16) + uploadImageOpts := volumeactions.UploadImageOpts{ + ImageName: imageName, + Force: true, + } + + volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() + if err != nil { + return volumeImage, err + } + + t.Logf("Uploading volume %s as volume-backed image %s", volume.ID, imageName) + + if err := volumes.WaitForStatus(client, volume.ID, "available", 60); err != nil { + return volumeImage, err + } + + t.Logf("Uploaded volume %s as volume-backed image %s", volume.ID, imageName) + + return volumeImage, nil + +} + +// DeleteUploadedImage deletes uploaded image. An error will be returned +// if the deletion request failed. +func DeleteUploadedImage(t *testing.T, client *gophercloud.ServiceClient, imageName string) error { + if testing.Short() { + t.Skip("Skipping test that requires volume-backed image removing in short mode.") + } + + t.Logf("Getting image id for image name %s", imageName) + + imageID, err := images.IDFromName(client, imageName) + if err != nil { + return err + } + + t.Logf("Removing image %s", imageID) + + err = images.Delete(client, imageID).ExtractErr() + if err != nil { + return err + } + + return nil +} + +// CreateVolumeAttach will attach a volume to an instance. An error will be +// returned if the attachment failed. +func CreateVolumeAttach(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume, server *servers.Server) error { + if testing.Short() { + t.Skip("Skipping test that requires volume attachment in short mode.") + } + + attachOpts := volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: server.ID, + } + + t.Logf("Attempting to attach volume %s to server %s", volume.ID, server.ID) + + if err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr(); err != nil { + return err + } + + if err := volumes.WaitForStatus(client, volume.ID, "in-use", 60); err != nil { + return err + } + + t.Logf("Attached volume %s to server %s", volume.ID, server.ID) + + return nil +} + +// CreateVolumeReserve creates a volume reservation. An error will be returned +// if the reservation failed. +func CreateVolumeReserve(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) error { + if testing.Short() { + t.Skip("Skipping test that requires volume reservation in short mode.") + } + + t.Logf("Attempting to reserve volume %s", volume.ID) + + if err := volumeactions.Reserve(client, volume.ID).ExtractErr(); err != nil { + return err + } + + t.Logf("Reserved volume %s", volume.ID) + + return nil +} + +// DeleteVolumeAttach will detach a volume from an instance. A fatal error will +// occur if the snapshot failed to be deleted. This works best when used as a +// deferred function. +func DeleteVolumeAttach(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + t.Logf("Attepting to detach volume volume: %s", volume.ID) + + detachOpts := volumeactions.DetachOpts{ + AttachmentID: volume.Attachments[0].AttachmentID, + } + + if err := volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr(); err != nil { + t.Fatalf("Unable to detach volume %s: %v", volume.ID, err) + } + + if err := volumes.WaitForStatus(client, volume.ID, "available", 60); err != nil { + t.Fatalf("Volume %s failed to become unavailable in 60 seconds: %v", volume.ID, err) + } + + t.Logf("Detached volume: %s", volume.ID) +} + +// DeleteVolumeReserve deletes a volume reservation. A fatal error will occur +// if the deletion request failed. This works best when used as a deferred +// function. +func DeleteVolumeReserve(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + if testing.Short() { + t.Skip("Skipping test that requires volume reservation in short mode.") + } + + t.Logf("Attempting to unreserve volume %s", volume.ID) + + if err := volumeactions.Unreserve(client, volume.ID).ExtractErr(); err != nil { + t.Fatalf("Unable to unreserve volume %s: %v", volume.ID, err) + } + + t.Logf("Unreserved volume %s", volume.ID) +} + +// ExtendVolumeSize will extend the size of a volume. +func ExtendVolumeSize(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) error { + t.Logf("Attempting to extend the size of volume %s", volume.ID) + + extendOpts := volumeactions.ExtendSizeOpts{ + NewSize: 2, + } + + err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() + if err != nil { + return err + } + + if err := volumes.WaitForStatus(client, volume.ID, "available", 60); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/pkg.go new file mode 100644 index 000000000000..f18039dcb176 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/pkg.go @@ -0,0 +1,3 @@ +// The extensions package contains acceptance tests for the Openstack Cinder extensions service. + +package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/schedulerstats_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/schedulerstats_test.go new file mode 100644 index 000000000000..76093f4c126f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/schedulerstats_test.go @@ -0,0 +1,36 @@ +// +build acceptance blockstorage + +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" +) + +func TestSchedulerStatsList(t *testing.T) { + blockClient, err := clients.NewBlockStorageV2Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + listOpts := schedulerstats.ListOpts{ + Detail: true, + } + + allPages, err := schedulerstats.List(blockClient, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to query schedulerstats: %v", err) + } + + allStats, err := schedulerstats.ExtractStoragePools(allPages) + if err != nil { + t.Fatalf("Unable to extract pools: %v", err) + } + + for _, stat := range allStats { + tools.PrintResource(t, stat) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/services_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/services_test.go new file mode 100644 index 000000000000..b65f586ec8d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/services_test.go @@ -0,0 +1,32 @@ +// +build acceptance blockstorage + +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services" +) + +func TestServicesList(t *testing.T) { + blockClient, err := clients.NewBlockStorageV3Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := services.List(blockClient, services.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to list services: %v", err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + t.Fatalf("Unable to extract services") + } + + for _, service := range allServices { + tools.PrintResource(t, service) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/volumeactions_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/volumeactions_test.go new file mode 100644 index 000000000000..0c885167655f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/extensions/volumeactions_test.go @@ -0,0 +1,170 @@ +// +build acceptance blockstorage + +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + + blockstorage "github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2" + compute "github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2" +) + +func TestVolumeActionsUploadImageDestroy(t *testing.T) { + blockClient, err := clients.NewBlockStorageV2Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + computeClient, err := clients.NewComputeV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + volume, err := blockstorage.CreateVolume(t, blockClient) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer blockstorage.DeleteVolume(t, blockClient, volume) + + volumeImage, err := CreateUploadImage(t, blockClient, volume) + if err != nil { + t.Fatalf("Unable to upload volume-backed image: %v", err) + } + + tools.PrintResource(t, volumeImage) + + err = DeleteUploadedImage(t, computeClient, volumeImage.ImageName) + if err != nil { + t.Fatalf("Unable to delete volume-backed image: %v", err) + } +} + +func TestVolumeActionsAttachCreateDestroy(t *testing.T) { + blockClient, err := clients.NewBlockStorageV2Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + computeClient, err := clients.NewComputeV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := compute.CreateServer(t, computeClient) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer compute.DeleteServer(t, computeClient, server) + + volume, err := blockstorage.CreateVolume(t, blockClient) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer blockstorage.DeleteVolume(t, blockClient, volume) + + err = CreateVolumeAttach(t, blockClient, volume, server) + if err != nil { + t.Fatalf("Unable to attach volume: %v", err) + } + + newVolume, err := volumes.Get(blockClient, volume.ID).Extract() + if err != nil { + t.Fatal("Unable to get updated volume information: %v", err) + } + + DeleteVolumeAttach(t, blockClient, newVolume) +} + +func TestVolumeActionsReserveUnreserve(t *testing.T) { + client, err := clients.NewBlockStorageV2Client() + if err != nil { + t.Fatalf("Unable to create blockstorage client: %v", err) + } + + volume, err := blockstorage.CreateVolume(t, client) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer blockstorage.DeleteVolume(t, client, volume) + + err = CreateVolumeReserve(t, client, volume) + if err != nil { + t.Fatalf("Unable to create volume reserve: %v", err) + } + defer DeleteVolumeReserve(t, client, volume) +} + +func TestVolumeActionsExtendSize(t *testing.T) { + blockClient, err := clients.NewBlockStorageV2Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + volume, err := blockstorage.CreateVolume(t, blockClient) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer blockstorage.DeleteVolume(t, blockClient, volume) + + tools.PrintResource(t, volume) + + err = ExtendVolumeSize(t, blockClient, volume) + if err != nil { + t.Fatalf("Unable to resize volume: %v", err) + } + + newVolume, err := volumes.Get(blockClient, volume.ID).Extract() + if err != nil { + t.Fatal("Unable to get updated volume information: %v", err) + } + + tools.PrintResource(t, newVolume) +} + +// Note(jtopjian): I plan to work on this at some point, but it requires +// setting up a server with iscsi utils. +/* +func TestVolumeConns(t *testing.T) { + client, err := newClient() + th.AssertNoErr(t, err) + + t.Logf("Creating volume") + cv, err := volumes.Create(client, &volumes.CreateOpts{ + Size: 1, + Name: "blockv2-volume", + }).Extract() + th.AssertNoErr(t, err) + + defer func() { + err = volumes.WaitForStatus(client, cv.ID, "available", 60) + th.AssertNoErr(t, err) + + t.Logf("Deleting volume") + err = volumes.Delete(client, cv.ID).ExtractErr() + th.AssertNoErr(t, err) + }() + + err = volumes.WaitForStatus(client, cv.ID, "available", 60) + th.AssertNoErr(t, err) + + connOpts := &volumeactions.ConnectorOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: false, + Platform: "x86_64", + OSType: "linux2", + } + + t.Logf("Initializing connection") + _, err = volumeactions.InitializeConnection(client, cv.ID, connOpts).Extract() + th.AssertNoErr(t, err) + + t.Logf("Terminating connection") + err = volumeactions.TerminateConnection(client, cv.ID, connOpts).ExtractErr() + th.AssertNoErr(t, err) +} +*/ diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/blockstorage.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/blockstorage.go new file mode 100644 index 000000000000..9e8aeab9dc3b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/blockstorage.go @@ -0,0 +1,142 @@ +// Package noauth contains common functions for creating block storage based +// resources for use in acceptance tests. See the `*_test.go` files for +// example usages. +package noauth + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" +) + +// CreateVolume will create a volume with a random name and size of 1GB. An +// error will be returned if the volume was unable to be created. +func CreateVolume(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + if testing.Short() { + t.Skip("Skipping test that requires volume creation in short mode.") + } + + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + return volume, nil +} + +// CreateVolumeFromImage will create a volume from with a random name and size of +// 1GB. An error will be returned if the volume was unable to be created. +func CreateVolumeFromImage(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + if testing.Short() { + t.Skip("Skipping test that requires volume creation in short mode.") + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + ImageID: choices.ImageID, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + return volume, nil +} + +// DeleteVolume will delete a volume. A fatal error will occur if the volume +// failed to be deleted. This works best when used as a deferred function. +func DeleteVolume(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + err := volumes.Delete(client, volume.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume %s: %v", volume.ID, err) + } + + t.Logf("Deleted volume: %s", volume.ID) +} + +// CreateSnapshot will create a snapshot of the specified volume. +// Snapshot will be assigned a random name and description. +func CreateSnapshot(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) (*snapshots.Snapshot, error) { + if testing.Short() { + t.Skip("Skipping test that requires snapshot creation in short mode.") + } + + snapshotName := tools.RandomString("ACPTTEST", 16) + snapshotDescription := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create snapshot: %s", snapshotName) + + createOpts := snapshots.CreateOpts{ + VolumeID: volume.ID, + Name: snapshotName, + Description: snapshotDescription, + } + + snapshot, err := snapshots.Create(client, createOpts).Extract() + if err != nil { + return snapshot, err + } + + err = snapshots.WaitForStatus(client, snapshot.ID, "available", 60) + if err != nil { + return snapshot, err + } + + return snapshot, nil +} + +// DeleteSnapshot will delete a snapshot. A fatal error will occur if the +// snapshot failed to be deleted. +func DeleteSnapshot(t *testing.T, client *gophercloud.ServiceClient, snapshot *snapshots.Snapshot) { + err := snapshots.Delete(client, snapshot.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete snapshot %s: %+v", snapshot.ID, err) + } + + // Volumes can't be deleted until their snapshots have been, + // so block up to 120 seconds for the snapshot to delete. + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + if err != nil { + t.Fatalf("Error waiting for snapshot to delete: %v", err) + } + + t.Logf("Deleted snapshot: %s", snapshot.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/pkg.go new file mode 100644 index 000000000000..5d4f920cbf83 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/pkg.go @@ -0,0 +1,3 @@ +// The noauth package contains acceptance tests for the Openstack Cinder standalone service. + +package noauth diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/snapshots_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/snapshots_test.go new file mode 100644 index 000000000000..f287c3e5f285 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/snapshots_test.go @@ -0,0 +1,58 @@ +// +build acceptance blockstorage + +package noauth + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots" +) + +func TestSnapshotsList(t *testing.T) { + client, err := clients.NewBlockStorageV2NoAuthClient() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := snapshots.List(client, snapshots.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve snapshots: %v", err) + } + + allSnapshots, err := snapshots.ExtractSnapshots(allPages) + if err != nil { + t.Fatalf("Unable to extract snapshots: %v", err) + } + + for _, snapshot := range allSnapshots { + tools.PrintResource(t, snapshot) + } +} + +func TestSnapshotsCreateDelete(t *testing.T) { + client, err := clients.NewBlockStorageV2NoAuthClient() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + volume, err := CreateVolume(t, client) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer DeleteVolume(t, client, volume) + + snapshot, err := CreateSnapshot(t, client, volume) + if err != nil { + t.Fatalf("Unable to create snapshot: %v", err) + } + defer DeleteSnapshot(t, client, snapshot) + + newSnapshot, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve snapshot: %v", err) + } + + tools.PrintResource(t, newSnapshot) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/volumes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/volumes_test.go new file mode 100644 index 000000000000..4e10344cf684 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/noauth/volumes_test.go @@ -0,0 +1,52 @@ +// +build acceptance blockstorage + +package noauth + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" +) + +func TestVolumesList(t *testing.T) { + client, err := clients.NewBlockStorageV2NoAuthClient() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := volumes.List(client, volumes.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve volumes: %v", err) + } + + allVolumes, err := volumes.ExtractVolumes(allPages) + if err != nil { + t.Fatalf("Unable to extract volumes: %v", err) + } + + for _, volume := range allVolumes { + tools.PrintResource(t, volume) + } +} + +func TestVolumesCreateDestroy(t *testing.T) { + client, err := clients.NewBlockStorageV2NoAuthClient() + if err != nil { + t.Fatalf("Unable to create blockstorage client: %v", err) + } + + volume, err := CreateVolume(t, client) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer DeleteVolume(t, client, volume) + + newVolume, err := volumes.Get(client, volume.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve volume: %v", err) + } + + tools.PrintResource(t, newVolume) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/blockstorage.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/blockstorage.go new file mode 100644 index 000000000000..41f24e1ab294 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/blockstorage.go @@ -0,0 +1,142 @@ +// Package v1 contains common functions for creating block storage based +// resources for use in acceptance tests. See the `*_test.go` files for +// example usages. +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes" +) + +// CreateSnapshot will create a volume snapshot based off of a given volume and +// with a random name. An error will be returned if the snapshot failed to be +// created. +func CreateSnapshot(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) (*snapshots.Snapshot, error) { + if testing.Short() { + t.Skip("Skipping test that requires snapshot creation in short mode.") + } + + snapshotName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create snapshot %s based on volume %s", snapshotName, volume.ID) + + createOpts := snapshots.CreateOpts{ + Name: snapshotName, + VolumeID: volume.ID, + } + + snapshot, err := snapshots.Create(client, createOpts).Extract() + if err != nil { + return snapshot, err + } + + err = snapshots.WaitForStatus(client, snapshot.ID, "available", 60) + if err != nil { + return snapshot, err + } + + return snapshot, nil +} + +// CreateVolume will create a volume with a random name and size of 1GB. An +// error will be returned if the volume was unable to be created. +func CreateVolume(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + if testing.Short() { + t.Skip("Skipping test that requires volume creation in short mode.") + } + + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + return volume, nil +} + +// CreateVolumeType will create a volume type with a random name. An error will +// be returned if the volume type was unable to be created. +func CreateVolumeType(t *testing.T, client *gophercloud.ServiceClient) (*volumetypes.VolumeType, error) { + volumeTypeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume type: %s", volumeTypeName) + + createOpts := volumetypes.CreateOpts{ + Name: volumeTypeName, + ExtraSpecs: map[string]interface{}{ + "capabilities": "ssd", + "priority": 3, + }, + } + + volumeType, err := volumetypes.Create(client, createOpts).Extract() + if err != nil { + return volumeType, err + } + + return volumeType, nil +} + +// DeleteSnapshot will delete a snapshot. A fatal error will occur if the +// snapshot failed to be deleted. This works best when used as a deferred +// function. +func DeleteSnapshotshot(t *testing.T, client *gophercloud.ServiceClient, snapshot *snapshots.Snapshot) { + err := snapshots.Delete(client, snapshot.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete snapshot %s: %v", snapshot.ID, err) + } + + // Volumes can't be deleted until their snapshots have been, + // so block up to 120 seconds for the snapshot to delete. + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + if err != nil { + t.Fatalf("Unable to wait for snapshot to delete: %v", err) + } + + t.Logf("Deleted snapshot: %s", snapshot.ID) +} + +// DeleteVolume will delete a volume. A fatal error will occur if the volume +// failed to be deleted. This works best when used as a deferred function. +func DeleteVolume(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + err := volumes.Delete(client, volume.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume %s: %v", volume.ID, err) + } + + t.Logf("Deleted volume: %s", volume.ID) +} + +// DeleteVolumeType will delete a volume type. A fatal error will occur if the +// volume type failed to be deleted. This works best when used as a deferred +// function. +func DeleteVolumeType(t *testing.T, client *gophercloud.ServiceClient, volumeType *volumetypes.VolumeType) { + err := volumetypes.Delete(client, volumeType.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume type %s: %v", volumeType.ID, err) + } + + t.Logf("Deleted volume type: %s", volumeType.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/pkg.go new file mode 100644 index 000000000000..4efa6fbf1eee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/pkg.go @@ -0,0 +1,2 @@ +// Package v1 contains openstack cinder acceptance tests +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go new file mode 100644 index 000000000000..354537187a64 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/snapshots_test.go @@ -0,0 +1,58 @@ +// +build acceptance blockstorage + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots" +) + +func TestSnapshotsList(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := snapshots.List(client, snapshots.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve snapshots: %v", err) + } + + allSnapshots, err := snapshots.ExtractSnapshots(allPages) + if err != nil { + t.Fatalf("Unable to extract snapshots: %v", err) + } + + for _, snapshot := range allSnapshots { + tools.PrintResource(t, snapshot) + } +} + +func TestSnapshotsCreateDelete(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + volume, err := CreateVolume(t, client) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer DeleteVolume(t, client, volume) + + snapshot, err := CreateSnapshot(t, client, volume) + if err != nil { + t.Fatalf("Unable to create snapshot: %v", err) + } + defer DeleteSnapshotshot(t, client, snapshot) + + newSnapshot, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve snapshot: %v", err) + } + + tools.PrintResource(t, newSnapshot) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go new file mode 100644 index 000000000000..9a555009fb1a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumes_test.go @@ -0,0 +1,52 @@ +// +build acceptance blockstorage + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" +) + +func TestVolumesList(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := volumes.List(client, volumes.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve volumes: %v", err) + } + + allVolumes, err := volumes.ExtractVolumes(allPages) + if err != nil { + t.Fatalf("Unable to extract volumes: %v", err) + } + + for _, volume := range allVolumes { + tools.PrintResource(t, volume) + } +} + +func TestVolumesCreateDestroy(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create blockstorage client: %v", err) + } + + volume, err := CreateVolume(t, client) + if err != nil { + t.Fatalf("Unable to create volume: %v", err) + } + defer DeleteVolume(t, client, volume) + + newVolume, err := volumes.Get(client, volume.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve volume: %v", err) + } + + tools.PrintResource(t, newVolume) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go new file mode 100644 index 000000000000..ace09bc4d03f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v1/volumetypes_test.go @@ -0,0 +1,47 @@ +// +build acceptance blockstorage + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes" +) + +func TestVolumeTypesList(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + allPages, err := volumetypes.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve volume types: %v", err) + } + + allVolumeTypes, err := volumetypes.ExtractVolumeTypes(allPages) + if err != nil { + t.Fatalf("Unable to extract volume types: %v", err) + } + + for _, volumeType := range allVolumeTypes { + tools.PrintResource(t, volumeType) + } +} + +func TestVolumeTypesCreateDestroy(t *testing.T) { + client, err := clients.NewBlockStorageV1Client() + if err != nil { + t.Fatalf("Unable to create a blockstorage client: %v", err) + } + + volumeType, err := CreateVolumeType(t, client) + if err != nil { + t.Fatalf("Unable to create volume type: %v", err) + } + defer DeleteVolumeType(t, client, volumeType) + + tools.PrintResource(t, volumeType) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/blockstorage.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/blockstorage.go new file mode 100644 index 000000000000..5b6ea75e065a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/blockstorage.go @@ -0,0 +1,154 @@ +// Package v2 contains common functions for creating block storage based +// resources for use in acceptance tests. See the `*_test.go` files for +// example usages. +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateSnapshot will create a snapshot of the specified volume. +// Snapshot will be assigned a random name and description. +func CreateSnapshot(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) (*snapshots.Snapshot, error) { + snapshotName := tools.RandomString("ACPTTEST", 16) + snapshotDescription := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create snapshot: %s", snapshotName) + + createOpts := snapshots.CreateOpts{ + VolumeID: volume.ID, + Name: snapshotName, + Description: snapshotDescription, + } + + snapshot, err := snapshots.Create(client, createOpts).Extract() + if err != nil { + return snapshot, err + } + + err = snapshots.WaitForStatus(client, snapshot.ID, "available", 60) + if err != nil { + return snapshot, err + } + + t.Logf("Successfully created snapshot: %s", snapshot.ID) + + return snapshot, nil +} + +// CreateVolume will create a volume with a random name and size of 1GB. An +// error will be returned if the volume was unable to be created. +func CreateVolume(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + th.AssertEquals(t, volume.Name, volumeName) + th.AssertEquals(t, volume.Size, 1) + + tools.PrintResource(t, volume) + + t.Logf("Successfully created volume: %s", volume.ID) + + return volume, nil +} + +// CreateVolumeFromImage will create a volume from with a random name and size of +// 1GB. An error will be returned if the volume was unable to be created. +func CreateVolumeFromImage(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + ImageID: choices.ImageID, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + newVolume, err := volumes.Get(client, volume.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, newVolume.Name, volumeName) + th.AssertEquals(t, newVolume.Size, 1) + + t.Logf("Successfully created volume from image: %s", newVolume.ID) + + return newVolume, nil +} + +// DeleteVolume will delete a volume. A fatal error will occur if the volume +// failed to be deleted. This works best when used as a deferred function. +func DeleteVolume(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + t.Logf("Attempting to delete volume: %s", volume.ID) + + err := volumes.Delete(client, volume.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume %s: %v", volume.ID, err) + } + + t.Logf("Successfully deleted volume: %s", volume.ID) +} + +// DeleteSnapshot will delete a snapshot. A fatal error will occur if the +// snapshot failed to be deleted. +func DeleteSnapshot(t *testing.T, client *gophercloud.ServiceClient, snapshot *snapshots.Snapshot) { + t.Logf("Attempting to delete snapshot: %s", snapshot.ID) + + err := snapshots.Delete(client, snapshot.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete snapshot %s: %+v", snapshot.ID, err) + } + + // Volumes can't be deleted until their snapshots have been, + // so block up to 120 seconds for the snapshot to delete. + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + if err != nil { + t.Fatalf("Error waiting for snapshot to delete: %v", err) + } + + t.Logf("Successfully deleted snapshot: %s", snapshot.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/pkg.go new file mode 100644 index 000000000000..31dd0ffcb0ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/pkg.go @@ -0,0 +1,3 @@ +// The v2 package contains acceptance tests for the Openstack Cinder V2 service. + +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/snapshots_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/snapshots_test.go new file mode 100644 index 000000000000..a24f4dcefff8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/snapshots_test.go @@ -0,0 +1,46 @@ +// +build acceptance blockstorage + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestSnapshots(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + volume, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume) + + snapshot, err := CreateSnapshot(t, client, volume) + th.AssertNoErr(t, err) + defer DeleteSnapshot(t, client, snapshot) + + newSnapshot, err := snapshots.Get(client, snapshot.ID).Extract() + th.AssertNoErr(t, err) + + allPages, err := snapshots.List(client, snapshots.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + allSnapshots, err := snapshots.ExtractSnapshots(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allSnapshots { + tools.PrintResource(t, snapshot) + if v.ID == newSnapshot.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/volumes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/volumes_test.go new file mode 100644 index 000000000000..03f659f5e1d7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2/volumes_test.go @@ -0,0 +1,61 @@ +// +build acceptance blockstorage + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestVolumesCreateDestroy(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + volume, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume) + + newVolume, err := volumes.Get(client, volume.ID).Extract() + th.AssertNoErr(t, err) + + allPages, err := volumes.List(client, volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + allVolumes, err := volumes.ExtractVolumes(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allVolumes { + tools.PrintResource(t, volume) + if v.ID == newVolume.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestVolumesCreateForceDestroy(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + volume, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + + newVolume, err := volumes.Get(client, volume.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newVolume) + + err = volumeactions.ForceDelete(client, newVolume.ID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/blockstorage.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/blockstorage.go new file mode 100644 index 000000000000..aee4fcea01a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/blockstorage.go @@ -0,0 +1,153 @@ +// Package v3 contains common functions for creating block storage based +// resources for use in acceptance tests. See the `*_test.go` files for +// example usages. +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateSnapshot will create a snapshot of the specified volume. +// Snapshot will be assigned a random name and description. +func CreateSnapshot(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) (*snapshots.Snapshot, error) { + snapshotName := tools.RandomString("ACPTTEST", 16) + snapshotDescription := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create snapshot: %s", snapshotName) + + createOpts := snapshots.CreateOpts{ + VolumeID: volume.ID, + Name: snapshotName, + Description: snapshotDescription, + } + + snapshot, err := snapshots.Create(client, createOpts).Extract() + if err != nil { + return snapshot, err + } + + err = snapshots.WaitForStatus(client, snapshot.ID, "available", 60) + if err != nil { + return snapshot, err + } + + th.AssertEquals(t, snapshot.Name, snapshotName) + th.AssertEquals(t, snapshot.VolumeID, volume.ID) + tools.PrintResource(t, snapshot) + + t.Logf("Successfully created snapshot: %s", snapshot.ID) + + return snapshot, nil +} + +// CreateVolume will create a volume with a random name and size of 1GB. An +// error will be returned if the volume was unable to be created. +func CreateVolume(t *testing.T, client *gophercloud.ServiceClient) (*volumes.Volume, error) { + volumeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume: %s", volumeName) + + createOpts := volumes.CreateOpts{ + Size: 1, + Name: volumeName, + } + + volume, err := volumes.Create(client, createOpts).Extract() + if err != nil { + return volume, err + } + + err = volumes.WaitForStatus(client, volume.ID, "available", 60) + if err != nil { + return volume, err + } + + th.AssertEquals(t, volume.Name, volumeName) + tools.PrintResource(t, volume) + + t.Logf("Successfully created volume: %s", volume.ID) + + return volume, nil +} + +// CreateVolumeType will create a volume type with a random name. An +// error will be returned if the volume was unable to be created. +func CreateVolumeType(t *testing.T, client *gophercloud.ServiceClient) (*volumetypes.VolumeType, error) { + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create volume type: %s", name) + + createOpts := volumetypes.CreateOpts{ + Name: name, + ExtraSpecs: map[string]string{"volume_backend_name": "fake_backend_name"}, + Description: "create_from_gophercloud", + } + + vt, err := volumetypes.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, vt.IsPublic, true) + tools.PrintResource(t, vt) + + t.Logf("Successfully created volume type: %s", vt.ID) + + return vt, nil +} + +// DeleteSnapshot will delete a snapshot. A fatal error will occur if the +// snapshot failed to be deleted. +func DeleteSnapshot(t *testing.T, client *gophercloud.ServiceClient, snapshot *snapshots.Snapshot) { + err := snapshots.Delete(client, snapshot.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete snapshot %s: %+v", snapshot.ID, err) + } + + // Volumes can't be deleted until their snapshots have been, + // so block up to 120 seconds for the snapshot to delete. + err = gophercloud.WaitFor(120, func() (bool, error) { + _, err := snapshots.Get(client, snapshot.ID).Extract() + if err != nil { + return true, nil + } + + return false, nil + }) + if err != nil { + t.Fatalf("Error waiting for snapshot to delete: %v", err) + } + + t.Logf("Deleted snapshot: %s", snapshot.ID) +} + +// DeleteVolume will delete a volume. A fatal error will occur if the volume +// failed to be deleted. This works best when used as a deferred function. +func DeleteVolume(t *testing.T, client *gophercloud.ServiceClient, volume *volumes.Volume) { + t.Logf("Attempting to delete volume: %s", volume.ID) + + err := volumes.Delete(client, volume.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume %s: %v", volume.ID, err) + } + + t.Logf("Successfully deleted volume: %s", volume.ID) +} + +// DeleteVolumeType will delete a volume type. A fatal error will occur if the +// volume type failed to be deleted. This works best when used as a deferred +// function. +func DeleteVolumeType(t *testing.T, client *gophercloud.ServiceClient, vt *volumetypes.VolumeType) { + t.Logf("Attempting to delete volume type: %s", vt.ID) + + err := volumetypes.Delete(client, vt.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete volume %s: %v", vt.ID, err) + } + + t.Logf("Successfully deleted volume type: %s", vt.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/pkg.go new file mode 100644 index 000000000000..b86245c710f4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/pkg.go @@ -0,0 +1,3 @@ +// The v3 package contains acceptance tests for the OpenStack Cinder V3 service. + +package v3 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/quotaset_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/quotaset_test.go new file mode 100644 index 000000000000..f69e6866b85d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/quotaset_test.go @@ -0,0 +1,145 @@ +// +build acceptance quotasets + +package v3 + +import ( + "os" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestQuotasetGet(t *testing.T) { + clients.RequireAdmin(t) + + client, projectID := getClientAndProject(t) + + quotaSet, err := quotasets.Get(client, projectID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, quotaSet) +} + +func TestQuotasetGetDefaults(t *testing.T) { + clients.RequireAdmin(t) + + client, projectID := getClientAndProject(t) + + quotaSet, err := quotasets.GetDefaults(client, projectID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, quotaSet) +} + +func TestQuotasetGetUsage(t *testing.T) { + clients.RequireAdmin(t) + + client, projectID := getClientAndProject(t) + + quotaSetUsage, err := quotasets.GetUsage(client, projectID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, quotaSetUsage) +} + +var UpdateQuotaOpts = quotasets.UpdateOpts{ + Volumes: gophercloud.IntToPointer(100), + Snapshots: gophercloud.IntToPointer(200), + Gigabytes: gophercloud.IntToPointer(300), + PerVolumeGigabytes: gophercloud.IntToPointer(50), + Backups: gophercloud.IntToPointer(2), + BackupGigabytes: gophercloud.IntToPointer(300), +} + +var UpdatedQuotas = quotasets.QuotaSet{ + Volumes: 100, + Snapshots: 200, + Gigabytes: 300, + PerVolumeGigabytes: 50, + Backups: 2, + BackupGigabytes: 300, +} + +func TestQuotasetUpdate(t *testing.T) { + clients.RequireAdmin(t) + + client, projectID := getClientAndProject(t) + + // save original quotas + orig, err := quotasets.Get(client, projectID).Extract() + th.AssertNoErr(t, err) + + defer func() { + restore := quotasets.UpdateOpts{} + FillUpdateOptsFromQuotaSet(*orig, &restore) + + _, err = quotasets.Update(client, projectID, restore).Extract() + th.AssertNoErr(t, err) + }() + + // test Update + resultQuotas, err := quotasets.Update(client, projectID, UpdateQuotaOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, UpdatedQuotas, *resultQuotas) + + // We dont know the default quotas, so just check if the quotas are not the + // same as before + newQuotas, err := quotasets.Get(client, projectID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, resultQuotas.Volumes, newQuotas.Volumes) +} + +func TestQuotasetDelete(t *testing.T) { + clients.RequireAdmin(t) + + client, projectID := getClientAndProject(t) + + // save original quotas + orig, err := quotasets.Get(client, projectID).Extract() + th.AssertNoErr(t, err) + + defer func() { + restore := quotasets.UpdateOpts{} + FillUpdateOptsFromQuotaSet(*orig, &restore) + + _, err = quotasets.Update(client, projectID, restore).Extract() + th.AssertNoErr(t, err) + }() + + // Obtain environment default quotaset values to validate deletion. + defaultQuotaSet, err := quotasets.GetDefaults(client, projectID).Extract() + th.AssertNoErr(t, err) + + // Test Delete + err = quotasets.Delete(client, projectID).ExtractErr() + th.AssertNoErr(t, err) + + newQuotas, err := quotasets.Get(client, projectID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, newQuotas.Volumes, defaultQuotaSet.Volumes) +} + +// getClientAndProject reduces boilerplate by returning a new blockstorage v3 +// ServiceClient and a project ID obtained from the OS_PROJECT_NAME envvar. +func getClientAndProject(t *testing.T) (*gophercloud.ServiceClient, string) { + client, err := clients.NewBlockStorageV3Client() + th.AssertNoErr(t, err) + + projectID := os.Getenv("OS_PROJECT_NAME") + th.AssertNoErr(t, err) + return client, projectID +} + +func FillUpdateOptsFromQuotaSet(src quotasets.QuotaSet, dest *quotasets.UpdateOpts) { + dest.Volumes = &src.Volumes + dest.Snapshots = &src.Snapshots + dest.Gigabytes = &src.Gigabytes + dest.PerVolumeGigabytes = &src.PerVolumeGigabytes + dest.Backups = &src.Backups + dest.BackupGigabytes = &src.BackupGigabytes +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/snapshots_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/snapshots_test.go new file mode 100644 index 000000000000..60bfb70597ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/snapshots_test.go @@ -0,0 +1,58 @@ +// +build acceptance blockstorage + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestSnapshots(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewBlockStorageV3Client() + th.AssertNoErr(t, err) + + volume1, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume1) + + snapshot1, err := CreateSnapshot(t, client, volume1) + th.AssertNoErr(t, err) + defer DeleteSnapshot(t, client, snapshot1) + + volume2, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume2) + + snapshot2, err := CreateSnapshot(t, client, volume2) + th.AssertNoErr(t, err) + defer DeleteSnapshot(t, client, snapshot2) + + listOpts := snapshots.ListOpts{ + Limit: 1, + } + + err = snapshots.List(client, listOpts).EachPage(func(page pagination.Page) (bool, error) { + actual, err := snapshots.ExtractSnapshots(page) + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, len(actual)) + + var found bool + for _, v := range actual { + if v.ID == snapshot1.ID || v.ID == snapshot2.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + return true, nil + }) + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumes_test.go new file mode 100644 index 000000000000..32ad756c0a69 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumes_test.go @@ -0,0 +1,50 @@ +// +build acceptance blockstorage + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestVolumes(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewBlockStorageV3Client() + th.AssertNoErr(t, err) + + volume1, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume1) + + volume2, err := CreateVolume(t, client) + th.AssertNoErr(t, err) + defer DeleteVolume(t, client, volume2) + + listOpts := volumes.ListOpts{ + Limit: 1, + } + + err = volumes.List(client, listOpts).EachPage(func(page pagination.Page) (bool, error) { + actual, err := volumes.ExtractVolumes(page) + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, len(actual)) + + var found bool + for _, v := range actual { + if v.ID == volume1.ID || v.ID == volume2.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + return true, nil + }) + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumetypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumetypes_test.go new file mode 100644 index 000000000000..635d054731d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v3/volumetypes_test.go @@ -0,0 +1,52 @@ +// +build acceptance blockstorage + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestVolumeTypes(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewBlockStorageV3Client() + th.AssertNoErr(t, err) + + vt, err := CreateVolumeType(t, client) + th.AssertNoErr(t, err) + defer DeleteVolumeType(t, client, vt) + + allPages, err := volumetypes.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allVolumeTypes, err := volumetypes.ExtractVolumeTypes(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allVolumeTypes { + tools.PrintResource(t, v) + if v.ID == vt.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + var isPublic = false + updateOpts := volumetypes.UpdateOpts{ + Name: vt.Name + "-UPDATED", + IsPublic: &isPublic, + } + + newVT, err := volumetypes.Update(client, vt.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, vt.Name+"-UPDATED", newVT.Name) + th.AssertEquals(t, false, newVT.IsPublic) + + tools.PrintResource(t, newVT) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/client_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/client_test.go new file mode 100644 index 000000000000..eed3a82ff465 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/client_test.go @@ -0,0 +1,86 @@ +// +build acceptance + +package openstack + +import ( + "os" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" +) + +func TestAuthenticatedClient(t *testing.T) { + // Obtain credentials from the environment. + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + t.Fatalf("Unable to acquire credentials: %v", err) + } + + client, err := openstack.AuthenticatedClient(ao) + if err != nil { + t.Fatalf("Unable to authenticate: %v", err) + } + + if client.TokenID == "" { + t.Errorf("No token ID assigned to the client") + } + + t.Logf("Client successfully acquired a token: %v", client.TokenID) + + // Find the storage service in the service catalog. + storage, err := openstack.NewObjectStorageV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + if err != nil { + t.Errorf("Unable to locate a storage service: %v", err) + } else { + t.Logf("Located a storage service at endpoint: [%s]", storage.Endpoint) + } +} + +func TestReauth(t *testing.T) { + ao, err := openstack.AuthOptionsFromEnv() + if err != nil { + t.Fatalf("Unable to obtain environment auth options: %v", err) + } + + // Allow reauth + ao.AllowReauth = true + + provider, err := openstack.NewClient(ao.IdentityEndpoint) + if err != nil { + t.Fatalf("Unable to create provider: %v", err) + } + + err = openstack.Authenticate(provider, ao) + if err != nil { + t.Fatalf("Unable to authenticate: %v", err) + } + + t.Logf("Creating a compute client") + _, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + if err != nil { + t.Fatalf("Unable to create compute client: %v", err) + } + + t.Logf("Sleeping for 1 second") + time.Sleep(1 * time.Second) + t.Logf("Attempting to reauthenticate") + + err = provider.ReauthFunc() + if err != nil { + t.Fatalf("Unable to reauthenticate: %v", err) + } + + t.Logf("Creating a compute client") + _, err = openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + if err != nil { + t.Fatalf("Unable to create compute client: %v", err) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/actions_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/actions_test.go new file mode 100644 index 000000000000..eca8fc62c62d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/actions_test.go @@ -0,0 +1,31 @@ +// +build acceptance clustering actions + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/actions" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestActionsList(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + opts := actions.ListOpts{ + Limit: 200, + } + + allPages, err := actions.List(client, opts).AllPages() + th.AssertNoErr(t, err) + + allActions, err := actions.ExtractActions(allPages) + th.AssertNoErr(t, err) + + for _, action := range allActions { + tools.PrintResource(t, action) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clustering.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clustering.go new file mode 100644 index 000000000000..4122beb906ae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clustering.go @@ -0,0 +1,433 @@ +package v1 + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/actions" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policies" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +var TestPolicySpec = policies.Spec{ + Description: "new policy description", + Properties: map[string]interface{}{ + "destroy_after_deletion": true, + "grace_period": 60, + "reduce_desired_capacity": false, + "criteria": "OLDEST_FIRST", + }, + Type: "senlin.policy.deletion", + Version: "1.1", +} + +// CreateCluster creates a random cluster. An error will be returned if +// the cluster could not be created. +func CreateCluster(t *testing.T, client *gophercloud.ServiceClient, profileID string) (*clusters.Cluster, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create cluster: %s", name) + + createOpts := clusters.CreateOpts{ + Name: name, + DesiredCapacity: 1, + ProfileID: profileID, + MinSize: new(int), + MaxSize: 20, + Timeout: 3600, + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Config: map[string]interface{}{}, + } + + res := clusters.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, true, requestID != "") + t.Logf("Cluster %s request ID: %s", name, requestID) + + actionID, err := GetActionID(res.Header) + th.AssertNoErr(t, err) + th.AssertEquals(t, true, actionID != "") + t.Logf("Cluster %s action ID: %s", name, actionID) + + err = WaitForAction(client, actionID) + if err != nil { + return nil, err + } + + cluster, err := res.Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created cluster: %s", cluster.ID) + + tools.PrintResource(t, cluster) + tools.PrintResource(t, cluster.CreatedAt) + + th.AssertEquals(t, name, cluster.Name) + th.AssertEquals(t, profileID, cluster.ProfileID) + + return cluster, nil +} + +// CreateNode creates a random node. An error will be returned if +// the node could not be created. +func CreateNode(t *testing.T, client *gophercloud.ServiceClient, clusterID, profileID string) (*nodes.Node, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create node: %s", name) + + createOpts := nodes.CreateOpts{ + ClusterID: clusterID, + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Name: name, + ProfileID: profileID, + Role: "", + } + + res := nodes.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, true, requestID != "") + t.Logf("Node %s request ID: %s", name, requestID) + + actionID, err := GetActionID(res.Header) + th.AssertNoErr(t, err) + th.AssertEquals(t, true, actionID != "") + t.Logf("Node %s action ID: %s", name, actionID) + + err = WaitForAction(client, actionID) + if err != nil { + return nil, err + } + + node, err := res.Extract() + if err != nil { + return nil, err + } + + err = WaitForNodeStatus(client, node.ID, "ACTIVE") + if err != nil { + return nil, err + } + + t.Logf("Successfully created node: %s", node.ID) + + node, err = nodes.Get(client, node.ID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, node) + tools.PrintResource(t, node.CreatedAt) + + th.AssertEquals(t, profileID, node.ProfileID) + th.AssertEquals(t, clusterID, node.ClusterID) + th.AssertDeepEquals(t, createOpts.Metadata, node.Metadata) + + return node, nil +} + +// CreatePolicy creates a random policy. An error will be returned if the +// policy could not be created. +func CreatePolicy(t *testing.T, client *gophercloud.ServiceClient) (*policies.Policy, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create policy: %s", name) + + createOpts := policies.CreateOpts{ + Name: name, + Spec: TestPolicySpec, + } + + res := policies.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, true, requestID != "") + + t.Logf("Policy %s request ID: %s", name, requestID) + + policy, err := res.Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created policy: %s", policy.ID) + + tools.PrintResource(t, policy) + tools.PrintResource(t, policy.CreatedAt) + + th.AssertEquals(t, name, policy.Name) + + return policy, nil +} + +// CreateProfile will create a random profile. An error will be returned if the +// profile could not be created. +func CreateProfile(t *testing.T, client *gophercloud.ServiceClient) (*profiles.Profile, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + return nil, err + } + + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create profile: %s", name) + + networks := []map[string]interface{}{ + {"network": choices.NetworkName}, + } + + props := map[string]interface{}{ + "name": name, + "flavor": choices.FlavorID, + "image": choices.ImageID, + "networks": networks, + "security_groups": "", + } + + createOpts := profiles.CreateOpts{ + Name: name, + Spec: profiles.Spec{ + Type: "os.nova.server", + Version: "1.0", + Properties: props, + }, + } + + res := profiles.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, true, requestID != "") + + t.Logf("Profile %s request ID: %s", name, requestID) + + profile, err := res.Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created profile: %s", profile.ID) + + tools.PrintResource(t, profile) + tools.PrintResource(t, profile.CreatedAt) + + th.AssertEquals(t, name, profile.Name) + th.AssertEquals(t, profile.Spec.Type, "os.nova.server") + th.AssertEquals(t, profile.Spec.Version, "1.0") + + return profile, nil +} + +// CreateReceiver will create a random profile. An error will be returned if the +// profile could not be created. +func CreateReceiver(t *testing.T, client *gophercloud.ServiceClient, clusterID string) (*receivers.Receiver, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create receiver: %s", name) + + createOpts := receivers.CreateOpts{ + Name: name, + ClusterID: clusterID, + Type: receivers.WebhookReceiver, + Action: "CLUSTER_SCALE_OUT", + } + + res := receivers.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + receiver, err := res.Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created receiver: %s", receiver.ID) + + tools.PrintResource(t, receiver) + tools.PrintResource(t, receiver.CreatedAt) + + th.AssertEquals(t, name, receiver.Name) + th.AssertEquals(t, createOpts.Action, receiver.Action) + + return receiver, nil +} + +// DeleteCluster will delete a given policy. A fatal error will occur if the +// cluster could not be deleted. This works best as a deferred function. +func DeleteCluster(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete cluster: %s", id) + + res := clusters.Delete(client, id) + if res.Err != nil { + t.Fatalf("Error deleting cluster %s: %s:", id, res.Err) + } + + actionID, err := GetActionID(res.Header) + if err != nil { + t.Fatalf("Error deleting cluster %s: %s:", id, res.Err) + } + + err = WaitForAction(client, actionID) + if err != nil { + t.Fatalf("Error deleting cluster %s: %s:", id, res.Err) + } + + t.Logf("Successfully deleted cluster: %s", id) + + return +} + +// DeleteNode will delete a given node. A fatal error will occur if the +// node could not be deleted. This works best as a deferred function. +func DeleteNode(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete node: %s", id) + + err := nodes.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Error deleting node %s: %s:", id, err) + } + + err = WaitForNodeStatus(client, id, "DELETED") + if err != nil { + t.Fatalf("Error deleting node %s: %s", id, err) + } + + t.Logf("Successfully deleted node: %s", id) + + return +} + +// DeletePolicy will delete a given policy. A fatal error will occur if the +// policy could not be deleted. This works best as a deferred function. +func DeletePolicy(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete policy: %s", id) + + err := policies.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Error deleting policy %s: %s:", id, err) + } + + t.Logf("Successfully deleted policy: %s", id) + + return +} + +// DeleteProfile will delete a given profile. A fatal error will occur if the +// profile could not be deleted. This works best as a deferred function. +func DeleteProfile(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete profile: %s", id) + + err := profiles.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Error deleting profile %s: %s:", id, err) + } + + t.Logf("Successfully deleted profile: %s", id) + + return +} + +// DeleteReceiver will delete a given receiver. A fatal error will occur if the +// receiver could not be deleted. This works best as a deferred function. +func DeleteReceiver(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete Receiver: %s", id) + + res := receivers.Delete(client, id) + if res.Err != nil { + t.Fatalf("Error deleting receiver %s: %s:", id, res.Err) + } + + t.Logf("Successfully deleted receiver: %s", id) + + return +} + +// GetActionID parses an HTTP header and returns the action ID. +func GetActionID(headers http.Header) (string, error) { + location := headers.Get("Location") + v := strings.Split(location, "actions/") + if len(v) < 2 { + return "", fmt.Errorf("unable to determine action ID") + } + + actionID := v[1] + + return actionID, nil +} + +func WaitForAction(client *gophercloud.ServiceClient, actionID string) error { + return tools.WaitFor(func() (bool, error) { + action, err := actions.Get(client, actionID).Extract() + if err != nil { + return false, err + } + + if action.Status == "SUCCEEDED" { + return true, nil + } + + if action.Status == "FAILED" { + return false, fmt.Errorf("Action %s in FAILED state", actionID) + } + + return false, nil + }) +} + +func WaitForNodeStatus(client *gophercloud.ServiceClient, id string, status string) error { + return tools.WaitFor(func() (bool, error) { + latest, err := nodes.Get(client, id).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok && status == "DELETED" { + return true, nil + } + + return false, err + } + + if latest.Status == status { + return true, nil + } + + if latest.Status == "ERROR" { + return false, fmt.Errorf("Node %s in ERROR state", id) + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clusters_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clusters_test.go new file mode 100644 index 000000000000..ba759b9bd632 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/clusters_test.go @@ -0,0 +1,267 @@ +// +build acceptance clustering policies + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestClustersCRUD(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + // Test clusters list + allPages, err := clusters.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allClusters, err := clusters.ExtractClusters(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allClusters { + if v.ID == cluster.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Test cluster update + updateOpts := clusters.UpdateOpts{ + Name: cluster.Name + "-UPDATED", + } + + res := clusters.Update(client, cluster.ID, updateOpts) + th.AssertNoErr(t, res.Err) + + actionID, err := GetActionID(res.Header) + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + newCluster, err := clusters.Get(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, newCluster.Name, cluster.Name+"-UPDATED") + + tools.PrintResource(t, newCluster) + + // Test cluster health + actionID, err = clusters.Check(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) +} + +func TestClustersResize(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + iTrue := true + resizeOpts := clusters.ResizeOpts{ + AdjustmentType: clusters.ChangeInCapacityAdjustment, + Number: 1, + Strict: &iTrue, + } + + actionID, err := clusters.Resize(client, cluster.ID, resizeOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + newCluster, err := clusters.Get(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, newCluster.DesiredCapacity, 2) + + tools.PrintResource(t, newCluster) +} + +func TestClustersScale(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + // increase cluster size to 2 + scaleOutOpts := clusters.ScaleOutOpts{ + Count: 1, + } + actionID, err := clusters.ScaleOut(client, cluster.ID, scaleOutOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + newCluster, err := clusters.Get(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, newCluster.DesiredCapacity, 2) + + // reduce cluster size to 0 + count := 2 + scaleInOpts := clusters.ScaleInOpts{ + Count: &count, + } + + actionID, err = clusters.ScaleIn(client, cluster.ID, scaleInOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + newCluster, err = clusters.Get(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, newCluster.DesiredCapacity, 0) + + tools.PrintResource(t, newCluster) +} + +func TestClustersPolicies(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + client.Microversion = "1.5" + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + policy, err := CreatePolicy(t, client) + th.AssertNoErr(t, err) + defer DeletePolicy(t, client, policy.ID) + + iTrue := true + attachPolicyOpts := clusters.AttachPolicyOpts{ + PolicyID: policy.ID, + Enabled: &iTrue, + } + + actionID, err := clusters.AttachPolicy(client, cluster.ID, attachPolicyOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + // List all policies in the cluster to see if the policy was + // successfully attached. + allPages, err := clusters.ListPolicies(client, cluster.ID, nil).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err := clusters.ExtractClusterPolicies(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allPolicies { + tools.PrintResource(t, v) + if v.PolicyID == policy.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Set the policy to disabled + iFalse := false + updatePolicyOpts := clusters.UpdatePolicyOpts{ + PolicyID: policy.ID, + Enabled: &iFalse, + } + + actionID, err = clusters.UpdatePolicy(client, cluster.ID, updatePolicyOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + clusterPolicy, err := clusters.GetPolicy(client, cluster.ID, policy.ID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, clusterPolicy.Enabled, false) + + // Detach the policy + detachPolicyOpts := clusters.DetachPolicyOpts{ + PolicyID: policy.ID, + } + + actionID, err = clusters.DetachPolicy(client, cluster.ID, detachPolicyOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + // List all policies in the cluster to see if the policy was + // successfully detached. + allPages, err = clusters.ListPolicies(client, cluster.ID, nil).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err = clusters.ExtractClusterPolicies(allPages) + th.AssertNoErr(t, err) + + found = false + for _, v := range allPolicies { + tools.PrintResource(t, v) + if v.PolicyID == policy.ID { + found = true + } + } + + th.AssertEquals(t, found, false) +} + +func TestClustersRecovery(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + recoverOpts := clusters.RecoverOpts{ + Operation: clusters.RebuildRecovery, + } + + actionID, err := clusters.Recover(client, cluster.ID, recoverOpts).Extract() + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + newCluster, err := clusters.Get(client, cluster.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newCluster) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/nodes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/nodes_test.go new file mode 100644 index 000000000000..0890d45ff1b5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/nodes_test.go @@ -0,0 +1,69 @@ +// +build acceptance clustering policies + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestNodesCRUD(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + node, err := CreateNode(t, client, cluster.ID, profile.ID) + th.AssertNoErr(t, err) + defer DeleteNode(t, client, node.ID) + + // Test nodes list + allPages, err := nodes.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allNodes, err := nodes.ExtractNodes(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allNodes { + if v.ID == node.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Test nodes update + t.Logf("Attempting to update node %s", node.ID) + + updateOpts := nodes.UpdateOpts{ + Metadata: map[string]interface{}{ + "bar": "baz", + }, + } + + res := nodes.Update(client, node.ID, updateOpts) + th.AssertNoErr(t, res.Err) + + actionID, err := GetActionID(res.Header) + th.AssertNoErr(t, err) + + err = WaitForAction(client, actionID) + th.AssertNoErr(t, err) + + node, err = nodes.Get(client, node.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, node) + tools.PrintResource(t, node.Metadata) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/pkg.go new file mode 100644 index 000000000000..e2200bc5ba8c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/pkg.go @@ -0,0 +1,2 @@ +// Package v1 package contains acceptance tests for the Openstack Clustering V1 service. +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policies_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policies_test.go new file mode 100644 index 000000000000..3d5835156764 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policies_test.go @@ -0,0 +1,69 @@ +// +build acceptance clustering policies + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policies" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestPoliciesCRUD(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + client.Microversion = "1.5" + + policy, err := CreatePolicy(t, client) + th.AssertNoErr(t, err) + defer DeletePolicy(t, client, policy.ID) + + // Test listing policies + allPages, err := policies.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err := policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allPolicies { + if v.ID == policy.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Test Get policy + getPolicy, err := policies.Get(client, policy.ID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, getPolicy) + + // Test updating policy + updateOpts := policies.UpdateOpts{ + Name: policy.Name + "-UPDATE", + } + + t.Logf("Attempting to update policy: %s", policy.ID) + updatePolicy, err := policies.Update(client, policy.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, updatePolicy) + tools.PrintResource(t, updatePolicy.UpdatedAt) + + // Test validating policy + t.Logf("Attempting to validate policy: %s", policy.ID) + validateOpts := policies.ValidateOpts{ + Spec: TestPolicySpec, + } + + validatePolicy, err := policies.Validate(client, validateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, validatePolicy) + + th.AssertEquals(t, validatePolicy.Name, "validated_policy") + th.AssertEquals(t, validatePolicy.Spec.Version, TestPolicySpec.Version) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policytypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policytypes_test.go new file mode 100644 index 000000000000..fdb42a315360 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/policytypes_test.go @@ -0,0 +1,64 @@ +// +build acceptance clustering policytypes + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestPolicyTypeList(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + allPages, err := policytypes.List(client).AllPages() + th.AssertNoErr(t, err) + + allPolicyTypes, err := policytypes.ExtractPolicyTypes(allPages) + th.AssertNoErr(t, err) + + for _, v := range allPolicyTypes { + tools.PrintResource(t, v) + } +} + +func TestPolicyTypeList_v_1_5(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + client.Microversion = "1.5" + allPages, err := policytypes.List(client).AllPages() + th.AssertNoErr(t, err) + + allPolicyTypes, err := policytypes.ExtractPolicyTypes(allPages) + th.AssertNoErr(t, err) + + for _, v := range allPolicyTypes { + tools.PrintResource(t, v) + } +} + +func TestPolicyTypeGet(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + policyType, err := policytypes.Get(client, "senlin.policy.batch-1.0").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, policyType) +} + +func TestPolicyTypeGet_v_1_5(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + client.Microversion = "1.5" + policyType, err := policytypes.Get(client, "senlin.policy.batch-1.0").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, policyType) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiles_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiles_test.go new file mode 100644 index 000000000000..f426f8323226 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiles_test.go @@ -0,0 +1,49 @@ +// +build acceptance clustering policies + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestProfilesCRUD(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + // Test listing profiles + allPages, err := profiles.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allProfiles, err := profiles.ExtractProfiles(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allProfiles { + if v.ID == profile.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Test updating profile + updateOpts := profiles.UpdateOpts{ + Name: profile.Name + "-UPDATED", + } + + newProfile, err := profiles.Update(client, profile.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, newProfile.Name, profile.Name+"-UPDATED") + + tools.PrintResource(t, newProfile) + tools.PrintResource(t, newProfile.UpdatedAt) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiletypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiletypes_test.go new file mode 100644 index 000000000000..039f926f5a8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/profiletypes_test.go @@ -0,0 +1,47 @@ +// +build acceptance clustering profiletypes + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestProfileTypesList(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + client.Microversion = "1.5" + + allPages, err := profiletypes.List(client).AllPages() + th.AssertNoErr(t, err) + + allProfileTypes, err := profiletypes.ExtractProfileTypes(allPages) + th.AssertNoErr(t, err) + + for _, profileType := range allProfileTypes { + tools.PrintResource(t, profileType) + } +} +func TestProfileTypesOpsList(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + client.Microversion = "1.5" + + profileTypeName := "os.nova.server-1.0" + allPages, err := profiletypes.ListOps(client, profileTypeName).AllPages() + th.AssertNoErr(t, err) + + ops, err := profiletypes.ExtractOps(allPages) + th.AssertNoErr(t, err) + + for k, v := range ops { + tools.PrintResource(t, k) + tools.PrintResource(t, v) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/receivers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/receivers_test.go new file mode 100644 index 000000000000..ff0ca8eea26b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/receivers_test.go @@ -0,0 +1,59 @@ +// +build acceptance clustering policies + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestReceiversCRUD(t *testing.T) { + client, err := clients.NewClusteringV1Client() + th.AssertNoErr(t, err) + + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + receiver, err := CreateReceiver(t, client, cluster.ID) + th.AssertNoErr(t, err) + defer DeleteReceiver(t, client, receiver.ID) + + // Test listing receivers + allPages, err := receivers.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allReceivers, err := receivers.ExtractReceivers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allReceivers { + if v.ID == receiver.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + // Test updating receivers + newName := receiver.Name + "-UPDATED" + updateOpts := receivers.UpdateOpts{ + Name: newName, + } + + receiver, err = receivers.Update(client, receiver.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, receiver) + tools.PrintResource(t, receiver.UpdatedAt) + + th.AssertEquals(t, receiver.Name, newName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/webhooktrigger_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/webhooktrigger_test.go new file mode 100644 index 000000000000..4b5434d1c153 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/clustering/v1/webhooktrigger_test.go @@ -0,0 +1,65 @@ +// +build acceptance clustering webhooks + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestClusteringWebhookTrigger(t *testing.T) { + + client, err := clients.NewClusteringV1Client() + if err != nil { + t.Fatalf("Unable to create clustering client: %v", err) + } + + opts := webhooks.TriggerOpts{ + V: "1", + } + + // create profile, cluster and receiver first + profile, err := CreateProfile(t, client) + th.AssertNoErr(t, err) + defer DeleteProfile(t, client, profile.ID) + + cluster, err := CreateCluster(t, client, profile.ID) + th.AssertNoErr(t, err) + defer DeleteCluster(t, client, cluster.ID) + + receiver, err := CreateReceiver(t, client, cluster.ID) + th.AssertNoErr(t, err) + defer DeleteReceiver(t, client, receiver.ID) + + // trigger webhook + actionID, err := webhooks.Trigger(client, receiver.ID, opts).Extract() + if err != nil { + t.Fatalf("Unable to extract webhooks trigger: %v", err) + } else { + t.Logf("Webhook trigger action id %s", actionID) + } + + err = WaitForAction(client, actionID) + if err != nil { + t.Fatalf("Error scaling out cluster %s as a result from webhook trigger: %s:", cluster.ID, err) + } + + // check that new node was created + nodelistopts := nodes.ListOpts{ + ClusterID: cluster.ID, + } + + allPages, err := nodes.List(client, nodelistopts).AllPages() + th.AssertNoErr(t, err) + + allNodes, err := nodes.ExtractNodes(allPages) + th.AssertNoErr(t, err) + + // there should be 2 nodes in the cluster after triggering webhook + th.AssertEquals(t, len(allNodes), 2) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/common.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/common.go new file mode 100644 index 000000000000..ba78cb635d98 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/common.go @@ -0,0 +1,19 @@ +// Package openstack contains common functions that can be used +// across all OpenStack components for acceptance testing. +package openstack + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/common/extensions" +) + +// PrintExtension prints an extension and all of its attributes. +func PrintExtension(t *testing.T, extension *extensions.Extension) { + t.Logf("Name: %s", extension.Name) + t.Logf("Namespace: %s", extension.Namespace) + t.Logf("Alias: %s", extension.Alias) + t.Logf("Description: %s", extension.Description) + t.Logf("Updated: %s", extension.Updated) + t.Logf("Links: %v", extension.Links) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/aggregates_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/aggregates_test.go new file mode 100644 index 000000000000..352adb38e3b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/aggregates_test.go @@ -0,0 +1,146 @@ +// +build acceptance compute aggregates + +package v2 + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestAggregatesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := aggregates.List(client).AllPages() + th.AssertNoErr(t, err) + + allAggregates, err := aggregates.ExtractAggregates(allPages) + th.AssertNoErr(t, err) + + for _, v := range allAggregates { + tools.PrintResource(t, v) + } +} + +func TestAggregatesCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + aggregate, err := CreateAggregate(t, client) + th.AssertNoErr(t, err) + + defer DeleteAggregate(t, client, aggregate) + + tools.PrintResource(t, aggregate) + + updateOpts := aggregates.UpdateOpts{ + Name: "new_aggregate_name", + AvailabilityZone: "new_azone", + } + + updatedAggregate, err := aggregates.Update(client, aggregate.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, aggregate) + + th.AssertEquals(t, updatedAggregate.Name, "new_aggregate_name") + th.AssertEquals(t, updatedAggregate.AvailabilityZone, "new_azone") +} + +func TestAggregatesAddRemoveHost(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + hostToAdd, err := getHypervisor(t, client) + th.AssertNoErr(t, err) + + aggregate, err := CreateAggregate(t, client) + th.AssertNoErr(t, err) + defer DeleteAggregate(t, client, aggregate) + + addHostOpts := aggregates.AddHostOpts{ + Host: hostToAdd.HypervisorHostname, + } + + aggregateWithNewHost, err := aggregates.AddHost(client, aggregate.ID, addHostOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, aggregateWithNewHost) + + th.AssertEquals(t, aggregateWithNewHost.Hosts[0], hostToAdd.HypervisorHostname) + + removeHostOpts := aggregates.RemoveHostOpts{ + Host: hostToAdd.HypervisorHostname, + } + + aggregateWithRemovedHost, err := aggregates.RemoveHost(client, aggregate.ID, removeHostOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, aggregateWithRemovedHost) + + th.AssertEquals(t, len(aggregateWithRemovedHost.Hosts), 0) +} + +func TestAggregatesSetRemoveMetadata(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + aggregate, err := CreateAggregate(t, client) + th.AssertNoErr(t, err) + defer DeleteAggregate(t, client, aggregate) + + opts := aggregates.SetMetadataOpts{ + Metadata: map[string]interface{}{"key": "value"}, + } + + aggregateWithMetadata, err := aggregates.SetMetadata(client, aggregate.ID, opts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, aggregateWithMetadata) + + if _, ok := aggregateWithMetadata.Metadata["key"]; !ok { + t.Fatalf("aggregate %s did not contain metadata", aggregateWithMetadata.Name) + } + + optsToRemove := aggregates.SetMetadataOpts{ + Metadata: map[string]interface{}{"key": nil}, + } + + aggregateWithRemovedKey, err := aggregates.SetMetadata(client, aggregate.ID, optsToRemove).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, aggregateWithRemovedKey) + + if _, ok := aggregateWithRemovedKey.Metadata["key"]; ok { + t.Fatalf("aggregate %s still contains metadata", aggregateWithRemovedKey.Name) + } +} + +func getHypervisor(t *testing.T, client *gophercloud.ServiceClient) (*hypervisors.Hypervisor, error) { + allPages, err := hypervisors.List(client).AllPages() + th.AssertNoErr(t, err) + + allHypervisors, err := hypervisors.ExtractHypervisors(allPages) + th.AssertNoErr(t, err) + + for _, h := range allHypervisors { + return &h, nil + } + + return nil, fmt.Errorf("Unable to get hypervisor") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/attachinterfaces_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/attachinterfaces_test.go new file mode 100644 index 000000000000..b8f968028544 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/attachinterfaces_test.go @@ -0,0 +1,51 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestAttachDetachInterface(t *testing.T) { + clients.RequireLong(t) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + iface, err := AttachInterface(t, client, server.ID) + th.AssertNoErr(t, err) + defer DetachInterface(t, client, server.ID, iface.PortID) + + tools.PrintResource(t, iface) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + var found bool + for _, networkAddresses := range server.Addresses[choices.NetworkName].([]interface{}) { + address := networkAddresses.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "fixed" { + fixedIP := address["addr"].(string) + + for _, v := range iface.FixedIPs { + if fixedIP == v.IPAddress { + found = true + } + } + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/availabilityzones_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/availabilityzones_test.go new file mode 100644 index 000000000000..4d030c296848 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/availabilityzones_test.go @@ -0,0 +1,58 @@ +// +build acceptance compute availabilityzones + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestAvailabilityZonesList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := availabilityzones.List(client).AllPages() + th.AssertNoErr(t, err) + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, zoneInfo := range availabilityZoneInfo { + tools.PrintResource(t, zoneInfo) + + if zoneInfo.ZoneName == "nova" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestAvailabilityZonesListDetail(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := availabilityzones.ListDetail(client).AllPages() + th.AssertNoErr(t, err) + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, zoneInfo := range availabilityZoneInfo { + tools.PrintResource(t, zoneInfo) + + if zoneInfo.ZoneName == "nova" { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go new file mode 100644 index 000000000000..ec5d72f75758 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/bootfromvolume_test.go @@ -0,0 +1,304 @@ +// +build acceptance compute bootfromvolume + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + blockstorage "github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestBootFromImage(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + }, + } + + server, err := CreateBootableVolumeServer(t, client, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + tools.PrintResource(t, server) + + th.AssertEquals(t, server.Image["id"], choices.ImageID) +} + +func TestBootFromNewVolume(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + VolumeSize: 2, + }, + } + + server, err := CreateBootableVolumeServer(t, client, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + attachPages, err := volumeattach.List(client, server.ID).AllPages() + th.AssertNoErr(t, err) + + attachments, err := volumeattach.ExtractVolumeAttachments(attachPages) + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + tools.PrintResource(t, attachments) + + if server.Image != nil { + t.Fatalf("server image should be nil") + } + + th.AssertEquals(t, len(attachments), 1) + + // TODO: volumes_attached extension +} + +func TestBootFromExistingVolume(t *testing.T) { + clients.RequireLong(t) + + computeClient, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + blockStorageClient, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + volume, err := blockstorage.CreateVolumeFromImage(t, blockStorageClient) + th.AssertNoErr(t, err) + + tools.PrintResource(t, volume) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: volume.ID, + }, + } + + server, err := CreateBootableVolumeServer(t, computeClient, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, computeClient, server) + + attachPages, err := volumeattach.List(computeClient, server.ID).AllPages() + th.AssertNoErr(t, err) + + attachments, err := volumeattach.ExtractVolumeAttachments(attachPages) + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + tools.PrintResource(t, attachments) + + if server.Image != nil { + t.Fatalf("server image should be nil") + } + + th.AssertEquals(t, len(attachments), 1) + th.AssertEquals(t, attachments[0].VolumeID, volume.ID) + // TODO: volumes_attached extension +} + +func TestBootFromMultiEphemeralServer(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + VolumeSize: 5, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + } + + server, err := CreateMultiEphemeralServer(t, client, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + tools.PrintResource(t, server) +} + +func TestAttachNewVolume(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + }, + bootfromvolume.BlockDevice{ + BootIndex: 1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 2, + }, + } + + server, err := CreateBootableVolumeServer(t, client, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + attachPages, err := volumeattach.List(client, server.ID).AllPages() + th.AssertNoErr(t, err) + + attachments, err := volumeattach.ExtractVolumeAttachments(attachPages) + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + tools.PrintResource(t, attachments) + + th.AssertEquals(t, server.Image["id"], choices.ImageID) + th.AssertEquals(t, len(attachments), 1) + + // TODO: volumes_attached extension +} + +func TestAttachExistingVolume(t *testing.T) { + clients.RequireLong(t) + + computeClient, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + blockStorageClient, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + volume, err := blockstorage.CreateVolume(t, blockStorageClient) + th.AssertNoErr(t, err) + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + }, + bootfromvolume.BlockDevice{ + BootIndex: 1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: volume.ID, + }, + } + + server, err := CreateBootableVolumeServer(t, computeClient, blockDevices) + th.AssertNoErr(t, err) + defer DeleteServer(t, computeClient, server) + + attachPages, err := volumeattach.List(computeClient, server.ID).AllPages() + th.AssertNoErr(t, err) + + attachments, err := volumeattach.ExtractVolumeAttachments(attachPages) + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + tools.PrintResource(t, attachments) + + th.AssertEquals(t, server.Image["id"], choices.ImageID) + th.AssertEquals(t, len(attachments), 1) + th.AssertEquals(t, attachments[0].VolumeID, volume.ID) + + // TODO: volumes_attached extension +} + +func TestBootFromNewCustomizedVolume(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test that requires server creation in short mode.") + } + + client, err := clients.NewComputeV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceImage, + UUID: choices.ImageID, + VolumeSize: 2, + DeviceType: "disk", + DiskBus: "virtio", + }, + } + + server, err := CreateBootableVolumeServer(t, client, blockDevices) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + defer DeleteServer(t, client, server) + + tools.PrintResource(t, server) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/compute.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/compute.go new file mode 100644 index 000000000000..89bd08de4351 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/compute.go @@ -0,0 +1,1005 @@ +// Package v2 contains common functions for creating compute-based resources +// for use in acceptance tests. See the `*_test.go` files for example usages. +package v2 + +import ( + "crypto/rand" + "crypto/rsa" + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + dsr "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "golang.org/x/crypto/ssh" +) + +// AssociateFloatingIP will associate a floating IP with an instance. An error +// will be returned if the floating IP was unable to be associated. +func AssociateFloatingIP(t *testing.T, client *gophercloud.ServiceClient, floatingIP *floatingips.FloatingIP, server *servers.Server) error { + associateOpts := floatingips.AssociateOpts{ + FloatingIP: floatingIP.IP, + } + + t.Logf("Attempting to associate floating IP %s to instance %s", floatingIP.IP, server.ID) + err := floatingips.AssociateInstance(client, server.ID, associateOpts).ExtractErr() + if err != nil { + return err + } + + return nil +} + +// AssociateFloatingIPWithFixedIP will associate a floating IP with an +// instance's specific fixed IP. An error will be returend if the floating IP +// was unable to be associated. +func AssociateFloatingIPWithFixedIP(t *testing.T, client *gophercloud.ServiceClient, floatingIP *floatingips.FloatingIP, server *servers.Server, fixedIP string) error { + associateOpts := floatingips.AssociateOpts{ + FloatingIP: floatingIP.IP, + FixedIP: fixedIP, + } + + t.Logf("Attempting to associate floating IP %s to fixed IP %s on instance %s", floatingIP.IP, fixedIP, server.ID) + err := floatingips.AssociateInstance(client, server.ID, associateOpts).ExtractErr() + if err != nil { + return err + } + + return nil +} + +// AttachInterface will create and attach an interface on a given server. +// An error will returned if the interface could not be created. +func AttachInterface(t *testing.T, client *gophercloud.ServiceClient, serverID string) (*attachinterfaces.Interface, error) { + t.Logf("Attempting to attach interface to server %s", serverID) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return nil, err + } + + createOpts := attachinterfaces.CreateOpts{ + NetworkID: networkID, + } + + iface, err := attachinterfaces.Create(client, serverID, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created interface %s on server %s", iface.PortID, serverID) + + return iface, nil +} + +// CreateAggregate will create an aggregate with random name and available zone. +// An error will be returned if the aggregate could not be created. +func CreateAggregate(t *testing.T, client *gophercloud.ServiceClient) (*aggregates.Aggregate, error) { + aggregateName := tools.RandomString("aggregate_", 5) + availabilityZone := tools.RandomString("zone_", 5) + t.Logf("Attempting to create aggregate %s", aggregateName) + + createOpts := aggregates.CreateOpts{ + Name: aggregateName, + AvailabilityZone: availabilityZone, + } + + aggregate, err := aggregates.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created aggregate %d", aggregate.ID) + + aggregate, err = aggregates.Get(client, aggregate.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, aggregate.Name, aggregateName) + th.AssertEquals(t, aggregate.AvailabilityZone, availabilityZone) + + return aggregate, nil +} + +// CreateBootableVolumeServer works like CreateServer but is configured with +// one or more block devices defined by passing in []bootfromvolume.BlockDevice. +// An error will be returned if a server was unable to be created. +func CreateBootableVolumeServer(t *testing.T, client *gophercloud.ServiceClient, blockDevices []bootfromvolume.BlockDevice) (*servers.Server, error) { + var server *servers.Server + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return server, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create bootable volume server: %s", name) + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + } + + if blockDevices[0].SourceType == bootfromvolume.SourceImage && blockDevices[0].DestinationType == bootfromvolume.DestinationLocal { + serverCreateOpts.ImageRef = blockDevices[0].UUID + } + + server, err = bootfromvolume.Create(client, bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + }).Extract() + + if err != nil { + return server, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return server, err + } + + newServer, err := servers.Get(client, server.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, newServer.Name, name) + th.AssertEquals(t, newServer.Flavor["id"], choices.FlavorID) + + return newServer, nil +} + +// CreateDefaultRule will create a default security group rule with a +// random port range between 80 and 90. An error will be returned if +// a default rule was unable to be created. +func CreateDefaultRule(t *testing.T, client *gophercloud.ServiceClient) (dsr.DefaultRule, error) { + createOpts := dsr.CreateOpts{ + FromPort: tools.RandomInt(80, 89), + ToPort: tools.RandomInt(90, 99), + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + defaultRule, err := dsr.Create(client, createOpts).Extract() + if err != nil { + return *defaultRule, err + } + + t.Logf("Created default rule: %s", defaultRule.ID) + + return *defaultRule, nil +} + +// CreateFlavor will create a flavor with a random name. +// An error will be returned if the flavor could not be created. +func CreateFlavor(t *testing.T, client *gophercloud.ServiceClient) (*flavors.Flavor, error) { + flavorName := tools.RandomString("flavor_", 5) + t.Logf("Attempting to create flavor %s", flavorName) + + isPublic := true + createOpts := flavors.CreateOpts{ + Name: flavorName, + RAM: 1, + VCPUs: 1, + Disk: gophercloud.IntToPointer(1), + IsPublic: &isPublic, + } + + flavor, err := flavors.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created flavor %s", flavor.ID) + + th.AssertEquals(t, flavor.Name, flavorName) + th.AssertEquals(t, flavor.RAM, 1) + th.AssertEquals(t, flavor.Disk, 1) + th.AssertEquals(t, flavor.VCPUs, 1) + th.AssertEquals(t, flavor.IsPublic, true) + + return flavor, nil +} + +// CreateFloatingIP will allocate a floating IP. +// An error will be returend if one was unable to be allocated. +func CreateFloatingIP(t *testing.T, client *gophercloud.ServiceClient) (*floatingips.FloatingIP, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + createOpts := floatingips.CreateOpts{ + Pool: choices.FloatingIPPoolName, + } + floatingIP, err := floatingips.Create(client, createOpts).Extract() + if err != nil { + return floatingIP, err + } + + t.Logf("Created floating IP: %s", floatingIP.ID) + return floatingIP, nil +} + +func createKey() (string, error) { + privateKey, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return "", err + } + + publicKey := privateKey.PublicKey + pub, err := ssh.NewPublicKey(&publicKey) + if err != nil { + return "", err + } + + pubBytes := ssh.MarshalAuthorizedKey(pub) + pk := string(pubBytes) + return pk, nil +} + +// CreateKeyPair will create a KeyPair with a random name. An error will occur +// if the keypair failed to be created. An error will be returned if the +// keypair was unable to be created. +func CreateKeyPair(t *testing.T, client *gophercloud.ServiceClient) (*keypairs.KeyPair, error) { + keyPairName := tools.RandomString("keypair_", 5) + + t.Logf("Attempting to create keypair: %s", keyPairName) + createOpts := keypairs.CreateOpts{ + Name: keyPairName, + } + keyPair, err := keypairs.Create(client, createOpts).Extract() + if err != nil { + return keyPair, err + } + + t.Logf("Created keypair: %s", keyPairName) + + th.AssertEquals(t, keyPair.Name, keyPairName) + + return keyPair, nil +} + +// CreateMultiEphemeralServer works like CreateServer but is configured with +// one or more block devices defined by passing in []bootfromvolume.BlockDevice. +// These block devices act like block devices when booting from a volume but +// are actually local ephemeral disks. +// An error will be returned if a server was unable to be created. +func CreateMultiEphemeralServer(t *testing.T, client *gophercloud.ServiceClient, blockDevices []bootfromvolume.BlockDevice) (*servers.Server, error) { + var server *servers.Server + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return server, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create bootable volume server: %s", name) + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + } + + server, err = bootfromvolume.Create(client, bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + }).Extract() + + if err != nil { + return server, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return server, err + } + + newServer, err := servers.Get(client, server.ID).Extract() + + th.AssertEquals(t, newServer.Name, name) + th.AssertEquals(t, newServer.Flavor["id"], choices.FlavorID) + th.AssertEquals(t, newServer.Image["id"], choices.ImageID) + + return newServer, nil +} + +// CreatePrivateFlavor will create a private flavor with a random name. +// An error will be returned if the flavor could not be created. +func CreatePrivateFlavor(t *testing.T, client *gophercloud.ServiceClient) (*flavors.Flavor, error) { + flavorName := tools.RandomString("flavor_", 5) + t.Logf("Attempting to create flavor %s", flavorName) + + isPublic := false + createOpts := flavors.CreateOpts{ + Name: flavorName, + RAM: 1, + VCPUs: 1, + Disk: gophercloud.IntToPointer(1), + IsPublic: &isPublic, + } + + flavor, err := flavors.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created flavor %s", flavor.ID) + + th.AssertEquals(t, flavor.Name, flavorName) + th.AssertEquals(t, flavor.RAM, 1) + th.AssertEquals(t, flavor.Disk, 1) + th.AssertEquals(t, flavor.VCPUs, 1) + th.AssertEquals(t, flavor.IsPublic, false) + + return flavor, nil +} + +// CreateSecurityGroup will create a security group with a random name. +// An error will be returned if one was failed to be created. +func CreateSecurityGroup(t *testing.T, client *gophercloud.ServiceClient) (*secgroups.SecurityGroup, error) { + name := tools.RandomString("secgroup_", 5) + + createOpts := secgroups.CreateOpts{ + Name: name, + Description: "something", + } + + securityGroup, err := secgroups.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Created security group: %s", securityGroup.ID) + + th.AssertEquals(t, securityGroup.Name, name) + + return securityGroup, nil +} + +// CreateSecurityGroupRule will create a security group rule with a random name +// and a random TCP port range between port 80 and 99. An error will be +// returned if the rule failed to be created. +func CreateSecurityGroupRule(t *testing.T, client *gophercloud.ServiceClient, securityGroupID string) (*secgroups.Rule, error) { + fromPort := tools.RandomInt(80, 89) + toPort := tools.RandomInt(90, 99) + createOpts := secgroups.CreateRuleOpts{ + ParentGroupID: securityGroupID, + FromPort: fromPort, + ToPort: toPort, + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Created security group rule: %s", rule.ID) + + th.AssertEquals(t, rule.FromPort, fromPort) + th.AssertEquals(t, rule.ToPort, toPort) + th.AssertEquals(t, rule.ParentGroupID, securityGroupID) + + return rule, nil +} + +// CreateServer creates a basic instance with a randomly generated name. +// The flavor of the instance will be the value of the OS_FLAVOR_ID environment variable. +// The image will be the value of the OS_IMAGE_ID environment variable. +// The instance will be launched on the network specified in OS_NETWORK_NAME. +// An error will be returned if the instance was unable to be created. +func CreateServer(t *testing.T, client *gophercloud.ServiceClient) (*servers.Server, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return nil, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + Metadata: map[string]string{ + "abc": "def", + }, + Personality: servers.Personality{ + &servers.File{ + Path: "/etc/test", + Contents: []byte("hello world"), + }, + }, + }).Extract() + if err != nil { + return server, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return nil, err + } + + newServer, err := servers.Get(client, server.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, newServer.Name, name) + th.AssertEquals(t, newServer.Flavor["id"], choices.FlavorID) + th.AssertEquals(t, newServer.Image["id"], choices.ImageID) + + return newServer, nil +} + +// CreateServerWithoutImageRef creates a basic instance with a randomly generated name. +// The flavor of the instance will be the value of the OS_FLAVOR_ID environment variable. +// The image is intentionally missing to trigger an error. +// The instance will be launched on the network specified in OS_NETWORK_NAME. +// An error will be returned if the instance was unable to be created. +func CreateServerWithoutImageRef(t *testing.T, client *gophercloud.ServiceClient) (*servers.Server, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return nil, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s", name) + + pwd := tools.MakeNewPassword("") + + server, err := servers.Create(client, servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + AdminPass: pwd, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + Personality: servers.Personality{ + &servers.File{ + Path: "/etc/test", + Contents: []byte("hello world"), + }, + }, + }).Extract() + if err != nil { + return nil, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return nil, err + } + + return server, nil +} + +// CreateServerGroup will create a server with a random name. An error will be +// returned if the server group failed to be created. +func CreateServerGroup(t *testing.T, client *gophercloud.ServiceClient, policy string) (*servergroups.ServerGroup, error) { + name := tools.RandomString("ACPTTEST", 16) + + t.Logf("Attempting to create server group %s", name) + + sg, err := servergroups.Create(client, &servergroups.CreateOpts{ + Name: name, + Policies: []string{policy}, + }).Extract() + + if err != nil { + return nil, err + } + + t.Logf("Successfully created server group %s", name) + + th.AssertEquals(t, sg.Name, name) + + return sg, nil +} + +// CreateServerInServerGroup works like CreateServer but places the instance in +// a specified Server Group. +func CreateServerInServerGroup(t *testing.T, client *gophercloud.ServiceClient, serverGroup *servergroups.ServerGroup) (*servers.Server, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return nil, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s", name) + + pwd := tools.MakeNewPassword("") + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + AdminPass: pwd, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + } + + schedulerHintsOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerhints.SchedulerHints{ + Group: serverGroup.ID, + }, + } + server, err := servers.Create(client, schedulerHintsOpts).Extract() + if err != nil { + return nil, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return nil, err + } + + newServer, err := servers.Get(client, server.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, newServer.Name, name) + th.AssertEquals(t, newServer.Flavor["id"], choices.FlavorID) + th.AssertEquals(t, newServer.Image["id"], choices.ImageID) + + return newServer, nil +} + +// CreateServerWithPublicKey works the same as CreateServer, but additionally +// configures the server with a specified Key Pair name. +func CreateServerWithPublicKey(t *testing.T, client *gophercloud.ServiceClient, keyPairName string) (*servers.Server, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + if err != nil { + return nil, err + } + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create server: %s", name) + + serverCreateOpts := servers.CreateOpts{ + Name: name, + FlavorRef: choices.FlavorID, + ImageRef: choices.ImageID, + Networks: []servers.Network{ + servers.Network{UUID: networkID}, + }, + } + + server, err := servers.Create(client, keypairs.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + KeyName: keyPairName, + }).Extract() + if err != nil { + return nil, err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return nil, err + } + + newServer, err := servers.Get(client, server.ID).Extract() + if err != nil { + return nil, err + } + + th.AssertEquals(t, newServer.Name, name) + th.AssertEquals(t, newServer.Flavor["id"], choices.FlavorID) + th.AssertEquals(t, newServer.Image["id"], choices.ImageID) + + return newServer, nil +} + +// CreateVolumeAttachment will attach a volume to a server. An error will be +// returned if the volume failed to attach. +func CreateVolumeAttachment(t *testing.T, client *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, server *servers.Server, volume *volumes.Volume) (*volumeattach.VolumeAttachment, error) { + volumeAttachOptions := volumeattach.CreateOpts{ + VolumeID: volume.ID, + } + + t.Logf("Attempting to attach volume %s to server %s", volume.ID, server.ID) + volumeAttachment, err := volumeattach.Create(client, server.ID, volumeAttachOptions).Extract() + if err != nil { + return volumeAttachment, err + } + + if err := volumes.WaitForStatus(blockClient, volume.ID, "in-use", 60); err != nil { + return volumeAttachment, err + } + + return volumeAttachment, nil +} + +// DeleteAggregate will delete a given host aggregate. A fatal error will occur if +// the aggregate deleting is failed. This works best when using it as a +// deferred function. +func DeleteAggregate(t *testing.T, client *gophercloud.ServiceClient, aggregate *aggregates.Aggregate) { + err := aggregates.Delete(client, aggregate.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete aggregate %d", aggregate.ID) + } + + t.Logf("Deleted aggregate: %d", aggregate.ID) +} + +// DeleteDefaultRule deletes a default security group rule. +// A fatal error will occur if the rule failed to delete. This works best when +// using it as a deferred function. +func DeleteDefaultRule(t *testing.T, client *gophercloud.ServiceClient, defaultRule dsr.DefaultRule) { + err := dsr.Delete(client, defaultRule.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete default rule %s: %v", defaultRule.ID, err) + } + + t.Logf("Deleted default rule: %s", defaultRule.ID) +} + +// DeleteFlavor will delete a flavor. A fatal error will occur if the flavor +// could not be deleted. This works best when using it as a deferred function. +func DeleteFlavor(t *testing.T, client *gophercloud.ServiceClient, flavor *flavors.Flavor) { + err := flavors.Delete(client, flavor.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete flavor %s", flavor.ID) + } + + t.Logf("Deleted flavor: %s", flavor.ID) +} + +// DeleteFloatingIP will de-allocate a floating IP. A fatal error will occur if +// the floating IP failed to de-allocate. This works best when using it as a +// deferred function. +func DeleteFloatingIP(t *testing.T, client *gophercloud.ServiceClient, floatingIP *floatingips.FloatingIP) { + err := floatingips.Delete(client, floatingIP.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete floating IP %s: %v", floatingIP.ID, err) + } + + t.Logf("Deleted floating IP: %s", floatingIP.ID) +} + +// DeleteKeyPair will delete a specified keypair. A fatal error will occur if +// the keypair failed to be deleted. This works best when used as a deferred +// function. +func DeleteKeyPair(t *testing.T, client *gophercloud.ServiceClient, keyPair *keypairs.KeyPair) { + err := keypairs.Delete(client, keyPair.Name).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete keypair %s: %v", keyPair.Name, err) + } + + t.Logf("Deleted keypair: %s", keyPair.Name) +} + +// DeleteSecurityGroup will delete a security group. A fatal error will occur +// if the group failed to be deleted. This works best as a deferred function. +func DeleteSecurityGroup(t *testing.T, client *gophercloud.ServiceClient, securityGroupID string) { + err := secgroups.Delete(client, securityGroupID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete security group %s: %s", securityGroupID, err) + } + + t.Logf("Deleted security group: %s", securityGroupID) +} + +// DeleteSecurityGroupRule will delete a security group rule. A fatal error +// will occur if the rule failed to be deleted. This works best when used +// as a deferred function. +func DeleteSecurityGroupRule(t *testing.T, client *gophercloud.ServiceClient, ruleID string) { + err := secgroups.DeleteRule(client, ruleID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete rule: %v", err) + } + + t.Logf("Deleted security group rule: %s", ruleID) +} + +// DeleteServer deletes an instance via its UUID. +// A fatal error will occur if the instance failed to be destroyed. This works +// best when using it as a deferred function. +func DeleteServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server) { + err := servers.Delete(client, server.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete server %s: %s", server.ID, err) + } + + if err := WaitForComputeStatus(client, server, "DELETED"); err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + t.Logf("Deleted server: %s", server.ID) + return + } + t.Fatalf("Error deleting server %s: %s", server.ID, err) + } + + // If we reach this point, the API returned an actual DELETED status + // which is a very short window of time, but happens occasionally. + t.Logf("Deleted server: %s", server.ID) +} + +// DeleteServerGroup will delete a server group. A fatal error will occur if +// the server group failed to be deleted. This works best when used as a +// deferred function. +func DeleteServerGroup(t *testing.T, client *gophercloud.ServiceClient, serverGroup *servergroups.ServerGroup) { + err := servergroups.Delete(client, serverGroup.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete server group %s: %v", serverGroup.ID, err) + } + + t.Logf("Deleted server group %s", serverGroup.ID) +} + +// DeleteVolumeAttachment will disconnect a volume from an instance. A fatal +// error will occur if the volume failed to detach. This works best when used +// as a deferred function. +func DeleteVolumeAttachment(t *testing.T, client *gophercloud.ServiceClient, blockClient *gophercloud.ServiceClient, server *servers.Server, volumeAttachment *volumeattach.VolumeAttachment) { + + err := volumeattach.Delete(client, server.ID, volumeAttachment.VolumeID).ExtractErr() + if err != nil { + t.Fatalf("Unable to detach volume: %v", err) + } + + if err := volumes.WaitForStatus(blockClient, volumeAttachment.ID, "available", 60); err != nil { + t.Fatalf("Unable to wait for volume: %v", err) + } + t.Logf("Deleted volume: %s", volumeAttachment.VolumeID) +} + +// DetachInterface will detach an interface from a server. A fatal +// error will occur if the interface could not be detached. This works best +// when used as a deferred function. +func DetachInterface(t *testing.T, client *gophercloud.ServiceClient, serverID, portID string) { + t.Logf("Attempting to detach interface %s from server %s", portID, serverID) + + err := attachinterfaces.Delete(client, serverID, portID).ExtractErr() + if err != nil { + t.Fatalf("Unable to detach interface %s from server %s", portID, serverID) + } + + t.Logf("Detached interface %s from server %s", portID, serverID) +} + +// DisassociateFloatingIP will disassociate a floating IP from an instance. A +// fatal error will occur if the floating IP failed to disassociate. This works +// best when using it as a deferred function. +func DisassociateFloatingIP(t *testing.T, client *gophercloud.ServiceClient, floatingIP *floatingips.FloatingIP, server *servers.Server) { + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: floatingIP.IP, + } + + err := floatingips.DisassociateInstance(client, server.ID, disassociateOpts).ExtractErr() + if err != nil { + t.Fatalf("Unable to disassociate floating IP %s from server %s: %v", floatingIP.IP, server.ID, err) + } + + t.Logf("Disassociated floating IP %s from server %s", floatingIP.IP, server.ID) +} + +// GetNetworkIDFromNetworks will return the network ID from a specified network +// UUID using the os-networks API extension. An error will be returned if the +// network could not be retrieved. +func GetNetworkIDFromNetworks(t *testing.T, client *gophercloud.ServiceClient, networkName string) (string, error) { + allPages, err := networks.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkList, err := networks.ExtractNetworks(allPages) + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + networkID := "" + for _, network := range networkList { + t.Logf("Network: %v", network) + if network.Label == networkName { + networkID = network.ID + } + } + + t.Logf("Found network ID for %s: %s", networkName, networkID) + + return networkID, nil +} + +// GetNetworkIDFromTenantNetworks will return the network UUID for a given +// network name using the os-tenant-networks API extension. An error will be +// returned if the network could not be retrieved. +func GetNetworkIDFromTenantNetworks(t *testing.T, client *gophercloud.ServiceClient, networkName string) (string, error) { + allPages, err := tenantnetworks.List(client).AllPages() + if err != nil { + return "", err + } + + allTenantNetworks, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + return "", err + } + + for _, network := range allTenantNetworks { + if network.Name == networkName { + return network.ID, nil + } + } + + return "", fmt.Errorf("Failed to obtain network ID for network %s", networkName) +} + +// ImportPublicKey will create a KeyPair with a random name and a specified +// public key. An error will be returned if the keypair failed to be created. +func ImportPublicKey(t *testing.T, client *gophercloud.ServiceClient, publicKey string) (*keypairs.KeyPair, error) { + keyPairName := tools.RandomString("keypair_", 5) + + t.Logf("Attempting to create keypair: %s", keyPairName) + createOpts := keypairs.CreateOpts{ + Name: keyPairName, + PublicKey: publicKey, + } + keyPair, err := keypairs.Create(client, createOpts).Extract() + if err != nil { + return keyPair, err + } + + t.Logf("Created keypair: %s", keyPairName) + + th.AssertEquals(t, keyPair.Name, keyPairName) + th.AssertEquals(t, keyPair.PublicKey, publicKey) + + return keyPair, nil +} + +// ResizeServer performs a resize action on an instance. An error will be +// returned if the instance failed to resize. +// The new flavor that the instance will be resized to is specified in OS_FLAVOR_ID_RESIZE. +func ResizeServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server) error { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatal(err) + } + + opts := &servers.ResizeOpts{ + FlavorRef: choices.FlavorIDResize, + } + if res := servers.Resize(client, server.ID, opts); res.Err != nil { + return res.Err + } + + if err := WaitForComputeStatus(client, server, "VERIFY_RESIZE"); err != nil { + return err + } + + return nil +} + +// WaitForComputeStatus will poll an instance's status until it either matches +// the specified status or the status becomes ERROR. +func WaitForComputeStatus(client *gophercloud.ServiceClient, server *servers.Server, status string) error { + return tools.WaitFor(func() (bool, error) { + latest, err := servers.Get(client, server.ID).Extract() + if err != nil { + return false, err + } + + if latest.Status == status { + // Success! + return true, nil + } + + if latest.Status == "ERROR" { + return false, fmt.Errorf("Instance in ERROR state") + } + + return false, nil + }) +} + +//Convenience method to fill an QuotaSet-UpdateOpts-struct from a QuotaSet-struct +func FillUpdateOptsFromQuotaSet(src quotasets.QuotaSet, dest *quotasets.UpdateOpts) { + dest.FixedIPs = &src.FixedIPs + dest.FloatingIPs = &src.FloatingIPs + dest.InjectedFileContentBytes = &src.InjectedFileContentBytes + dest.InjectedFilePathBytes = &src.InjectedFilePathBytes + dest.InjectedFiles = &src.InjectedFiles + dest.KeyPairs = &src.KeyPairs + dest.RAM = &src.RAM + dest.SecurityGroupRules = &src.SecurityGroupRules + dest.SecurityGroups = &src.SecurityGroups + dest.Cores = &src.Cores + dest.Instances = &src.Instances + dest.ServerGroups = &src.ServerGroups + dest.ServerGroupMembers = &src.ServerGroupMembers + dest.MetadataItems = &src.MetadataItems +} + +// RescueServer will place the specified server into rescue mode. +func RescueServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server) error { + t.Logf("Attempting to put server %s into rescue mode", server.ID) + _, err := rescueunrescue.Rescue(client, server.ID, rescueunrescue.RescueOpts{}).Extract() + if err != nil { + return err + } + + if err := WaitForComputeStatus(client, server, "RESCUE"); err != nil { + return err + } + + return nil +} + +// UnrescueServer will return server from rescue mode. +func UnrescueServer(t *testing.T, client *gophercloud.ServiceClient, server *servers.Server) error { + t.Logf("Attempting to return server %s from rescue mode", server.ID) + if err := rescueunrescue.Unrescue(client, server.ID).ExtractErr(); err != nil { + return err + } + + if err := WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/defsecrules_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/defsecrules_test.go new file mode 100644 index 000000000000..e97a3787188b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/defsecrules_test.go @@ -0,0 +1,61 @@ +// +build acceptance compute defsecrules + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + dsr "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestDefSecRulesList(t *testing.T) { + clients.RequireAdmin(t) + clients.RequireNovaNetwork(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := dsr.List(client).AllPages() + th.AssertNoErr(t, err) + + allDefaultRules, err := dsr.ExtractDefaultRules(allPages) + th.AssertNoErr(t, err) + + for _, defaultRule := range allDefaultRules { + tools.PrintResource(t, defaultRule) + } +} + +func TestDefSecRulesCreate(t *testing.T) { + clients.RequireAdmin(t) + clients.RequireNovaNetwork(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + defaultRule, err := CreateDefaultRule(t, client) + th.AssertNoErr(t, err) + defer DeleteDefaultRule(t, client, defaultRule) + + tools.PrintResource(t, defaultRule) +} + +func TestDefSecRulesGet(t *testing.T) { + clients.RequireAdmin(t) + clients.RequireNovaNetwork(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + defaultRule, err := CreateDefaultRule(t, client) + th.AssertNoErr(t, err) + defer DeleteDefaultRule(t, client, defaultRule) + + newDefaultRule, err := dsr.Get(client, defaultRule.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newDefaultRule) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/extension_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/extension_test.go new file mode 100644 index 000000000000..f76cc52e06a7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/extension_test.go @@ -0,0 +1,46 @@ +// +build acceptance compute extensions + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/common/extensions" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestExtensionsList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := extensions.List(client).AllPages() + th.AssertNoErr(t, err) + + allExtensions, err := extensions.ExtractExtensions(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, extension := range allExtensions { + tools.PrintResource(t, extension) + + if extension.Name == "SchedulerHints" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestExtensionsGet(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + extension, err := extensions.Get(client, "os-admin-actions").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, extension) + + th.AssertEquals(t, extension.Name, "AdminActions") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/flavors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/flavors_test.go new file mode 100644 index 000000000000..d4aa341a7439 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/flavors_test.go @@ -0,0 +1,202 @@ +// +build acceptance compute flavors + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + th "github.com/gophercloud/gophercloud/testhelper" + + identity "github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3" +) + +func TestFlavorsList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + allPages, err := flavors.ListDetail(client, nil).AllPages() + th.AssertNoErr(t, err) + + allFlavors, err := flavors.ExtractFlavors(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, flavor := range allFlavors { + tools.PrintResource(t, flavor) + + if flavor.ID == choices.FlavorID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestFlavorsAccessTypeList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + flavorAccessTypes := map[string]flavors.AccessType{ + "public": flavors.PublicAccess, + "private": flavors.PrivateAccess, + "all": flavors.AllAccess, + } + + for flavorTypeName, flavorAccessType := range flavorAccessTypes { + t.Logf("** %s flavors: **", flavorTypeName) + allPages, err := flavors.ListDetail(client, flavors.ListOpts{AccessType: flavorAccessType}).AllPages() + th.AssertNoErr(t, err) + + allFlavors, err := flavors.ExtractFlavors(allPages) + th.AssertNoErr(t, err) + + for _, flavor := range allFlavors { + tools.PrintResource(t, flavor) + } + } +} + +func TestFlavorsGet(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + flavor, err := flavors.Get(client, choices.FlavorID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, flavor) + + th.AssertEquals(t, flavor.ID, choices.FlavorID) +} + +func TestFlavorsCreateDelete(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + flavor, err := CreateFlavor(t, client) + th.AssertNoErr(t, err) + defer DeleteFlavor(t, client, flavor) + + tools.PrintResource(t, flavor) +} + +func TestFlavorsAccessesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + flavor, err := CreatePrivateFlavor(t, client) + th.AssertNoErr(t, err) + defer DeleteFlavor(t, client, flavor) + + allPages, err := flavors.ListAccesses(client, flavor.ID).AllPages() + th.AssertNoErr(t, err) + + allAccesses, err := flavors.ExtractAccesses(allPages) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(allAccesses), 0) +} + +func TestFlavorsAccessCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + identityClient, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := identity.CreateProject(t, identityClient, nil) + th.AssertNoErr(t, err) + defer identity.DeleteProject(t, identityClient, project.ID) + + flavor, err := CreatePrivateFlavor(t, client) + th.AssertNoErr(t, err) + defer DeleteFlavor(t, client, flavor) + + addAccessOpts := flavors.AddAccessOpts{ + Tenant: project.ID, + } + + accessList, err := flavors.AddAccess(client, flavor.ID, addAccessOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(accessList), 1) + th.AssertEquals(t, accessList[0].TenantID, project.ID) + th.AssertEquals(t, accessList[0].FlavorID, flavor.ID) + + for _, access := range accessList { + tools.PrintResource(t, access) + } + + removeAccessOpts := flavors.RemoveAccessOpts{ + Tenant: project.ID, + } + + accessList, err = flavors.RemoveAccess(client, flavor.ID, removeAccessOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(accessList), 0) +} + +func TestFlavorsExtraSpecsCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + flavor, err := CreatePrivateFlavor(t, client) + th.AssertNoErr(t, err) + defer DeleteFlavor(t, client, flavor) + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(client, flavor.ID, createOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, createdExtraSpecs) + + th.AssertEquals(t, len(createdExtraSpecs), 2) + th.AssertEquals(t, createdExtraSpecs["hw:cpu_policy"], "CPU-POLICY") + th.AssertEquals(t, createdExtraSpecs["hw:cpu_thread_policy"], "CPU-THREAD-POLICY") + + err = flavors.DeleteExtraSpec(client, flavor.ID, "hw:cpu_policy").ExtractErr() + th.AssertNoErr(t, err) + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_thread_policy": "CPU-THREAD-POLICY-BETTER", + } + updatedExtraSpec, err := flavors.UpdateExtraSpec(client, flavor.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, updatedExtraSpec) + + allExtraSpecs, err := flavors.ListExtraSpecs(client, flavor.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, allExtraSpecs) + + th.AssertEquals(t, len(allExtraSpecs), 1) + th.AssertEquals(t, allExtraSpecs["hw:cpu_thread_policy"], "CPU-THREAD-POLICY-BETTER") + + spec, err := flavors.GetExtraSpec(client, flavor.ID, "hw:cpu_thread_policy").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, spec) + + th.AssertEquals(t, spec["hw:cpu_thread_policy"], "CPU-THREAD-POLICY-BETTER") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go new file mode 100644 index 000000000000..8130873676a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/floatingip_test.go @@ -0,0 +1,123 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestFloatingIPsCreateDelete(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + floatingIP, err := CreateFloatingIP(t, client) + th.AssertNoErr(t, err) + defer DeleteFloatingIP(t, client, floatingIP) + + tools.PrintResource(t, floatingIP) + + allPages, err := floatingips.List(client).AllPages() + th.AssertNoErr(t, err) + + allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, fip := range allFloatingIPs { + tools.PrintResource(t, floatingIP) + + if fip.ID == floatingIP.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + fip, err := floatingips.Get(client, floatingIP.ID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, floatingIP.ID, fip.ID) +} + +func TestFloatingIPsAssociate(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + floatingIP, err := CreateFloatingIP(t, client) + th.AssertNoErr(t, err) + defer DeleteFloatingIP(t, client, floatingIP) + + tools.PrintResource(t, floatingIP) + + err = AssociateFloatingIP(t, client, floatingIP, server) + th.AssertNoErr(t, err) + defer DisassociateFloatingIP(t, client, floatingIP, server) + + newFloatingIP, err := floatingips.Get(client, floatingIP.ID).Extract() + th.AssertNoErr(t, err) + + t.Logf("Floating IP %s is associated with Fixed IP %s", floatingIP.IP, newFloatingIP.FixedIP) + + tools.PrintResource(t, newFloatingIP) + + th.AssertEquals(t, newFloatingIP.InstanceID, server.ID) +} + +func TestFloatingIPsFixedIPAssociate(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + newServer, err := servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + floatingIP, err := CreateFloatingIP(t, client) + th.AssertNoErr(t, err) + defer DeleteFloatingIP(t, client, floatingIP) + + tools.PrintResource(t, floatingIP) + + var fixedIP string + for _, networkAddresses := range newServer.Addresses[choices.NetworkName].([]interface{}) { + address := networkAddresses.(map[string]interface{}) + if address["OS-EXT-IPS:type"] == "fixed" { + if address["version"].(float64) == 4 { + fixedIP = address["addr"].(string) + } + } + } + + err = AssociateFloatingIPWithFixedIP(t, client, floatingIP, newServer, fixedIP) + th.AssertNoErr(t, err) + defer DisassociateFloatingIP(t, client, floatingIP, newServer) + + newFloatingIP, err := floatingips.Get(client, floatingIP.ID).Extract() + th.AssertNoErr(t, err) + + t.Logf("Floating IP %s is associated with Fixed IP %s", floatingIP.IP, newFloatingIP.FixedIP) + + tools.PrintResource(t, newFloatingIP) + + th.AssertEquals(t, newFloatingIP.InstanceID, server.ID) + th.AssertEquals(t, newFloatingIP.FixedIP, fixedIP) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/hypervisors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/hypervisors_test.go new file mode 100644 index 000000000000..29d49a277e06 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/hypervisors_test.go @@ -0,0 +1,95 @@ +// +build acceptance compute hypervisors + +package v2 + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestHypervisorsList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := hypervisors.List(client).AllPages() + th.AssertNoErr(t, err) + + allHypervisors, err := hypervisors.ExtractHypervisors(allPages) + th.AssertNoErr(t, err) + + for _, h := range allHypervisors { + tools.PrintResource(t, h) + } +} + +func TestHypervisorsGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + hypervisorID, err := getHypervisorID(t, client) + th.AssertNoErr(t, err) + + hypervisor, err := hypervisors.Get(client, hypervisorID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, hypervisor) + + th.AssertEquals(t, hypervisorID, hypervisor.ID) +} + +func TestHypervisorsGetStatistics(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + hypervisorsStats, err := hypervisors.GetStatistics(client).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, hypervisorsStats) + + if hypervisorsStats.Count == 0 { + t.Fatalf("Unable to get hypervisor stats") + } +} + +func TestHypervisorsGetUptime(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + hypervisorID, err := getHypervisorID(t, client) + th.AssertNoErr(t, err) + + hypervisor, err := hypervisors.GetUptime(client, hypervisorID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, hypervisor) + + th.AssertEquals(t, hypervisorID, hypervisor.ID) +} + +func getHypervisorID(t *testing.T, client *gophercloud.ServiceClient) (int, error) { + allPages, err := hypervisors.List(client).AllPages() + th.AssertNoErr(t, err) + + allHypervisors, err := hypervisors.ExtractHypervisors(allPages) + th.AssertNoErr(t, err) + + if len(allHypervisors) > 0 { + return allHypervisors[0].ID, nil + } + + return 0, fmt.Errorf("Unable to get hypervisor ID") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/images_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/images_test.go new file mode 100644 index 000000000000..d7fe19b35ba5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/images_test.go @@ -0,0 +1,52 @@ +// +build acceptance compute images + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestImagesList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + allPages, err := images.ListDetail(client, nil).AllPages() + th.AssertNoErr(t, err) + + allImages, err := images.ExtractImages(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, image := range allImages { + tools.PrintResource(t, image) + + if image.ID == choices.ImageID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestImagesGet(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + image, err := images.Get(client, choices.ImageID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, image) + + th.AssertEquals(t, choices.ImageID, image.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go new file mode 100644 index 000000000000..4bb0ed97d745 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/keypairs_test.go @@ -0,0 +1,102 @@ +// +build acceptance compute keypairs + +package v2 + +import ( + "testing" + + "golang.org/x/crypto/ssh" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +const keyName = "gophercloud_test_key_pair" + +func TestKeypairsParse(t *testing.T) { + clients.SkipRelease(t, "stable/mitaka") + clients.SkipRelease(t, "stable/newton") + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + keyPair, err := CreateKeyPair(t, client) + th.AssertNoErr(t, err) + defer DeleteKeyPair(t, client, keyPair) + + // There was a series of OpenStack releases, between Liberty and Ocata, + // where the returned SSH key was not parsable by Go. + // This checks if the issue is happening again. + _, err = ssh.ParsePrivateKey([]byte(keyPair.PrivateKey)) + th.AssertNoErr(t, err) + + tools.PrintResource(t, keyPair) +} + +func TestKeypairsCreateDelete(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + keyPair, err := CreateKeyPair(t, client) + th.AssertNoErr(t, err) + defer DeleteKeyPair(t, client, keyPair) + + tools.PrintResource(t, keyPair) + + allPages, err := keypairs.List(client).AllPages() + th.AssertNoErr(t, err) + + allKeys, err := keypairs.ExtractKeyPairs(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, kp := range allKeys { + tools.PrintResource(t, kp) + + if kp.Name == keyPair.Name { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestKeypairsImportPublicKey(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + publicKey, err := createKey() + th.AssertNoErr(t, err) + + keyPair, err := ImportPublicKey(t, client, publicKey) + th.AssertNoErr(t, err) + defer DeleteKeyPair(t, client, keyPair) + + tools.PrintResource(t, keyPair) +} + +func TestKeypairsServerCreateWithKey(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + publicKey, err := createKey() + th.AssertNoErr(t, err) + + keyPair, err := ImportPublicKey(t, client, publicKey) + th.AssertNoErr(t, err) + defer DeleteKeyPair(t, client, keyPair) + + server, err := CreateServerWithPublicKey(t, client, keyPair.Name) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, server.KeyName, keyPair.Name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/limits_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/limits_test.go new file mode 100644 index 000000000000..8133999c6cfd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/limits_test.go @@ -0,0 +1,50 @@ +// +build acceptance compute limits + +package v2 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestLimits(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + limits, err := limits.Get(client, nil).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, limits) + + th.AssertEquals(t, limits.Absolute.MaxPersonalitySize, 10240) +} + +func TestLimitsForTenant(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + // I think this is the easiest way to get the tenant ID while being + // agnostic to Identity v2 and v3. + // Technically we're just returning the limits for ourselves, but it's + // the fact that we're specifying a tenant ID that is important here. + endpointParts := strings.Split(client.Endpoint, "/") + tenantID := endpointParts[4] + + getOpts := limits.GetOpts{ + TenantID: tenantID, + } + + limits, err := limits.Get(client, getOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, limits) + + th.AssertEquals(t, limits.Absolute.MaxPersonalitySize, 10240) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/migrate_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/migrate_test.go new file mode 100644 index 000000000000..0661d12dcec5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/migrate_test.go @@ -0,0 +1,56 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestMigrate(t *testing.T) { + t.Skip("This is not passing in OpenLab. Works locally") + + clients.RequireLong(t) + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to migrate server %s", server.ID) + + err = migrate.Migrate(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestLiveMigrate(t *testing.T) { + clients.RequireLong(t) + clients.RequireAdmin(t) + clients.RequireLiveMigration(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to migrate server %s", server.ID) + + blockMigration := false + diskOverCommit := false + + liveMigrateOpts := migrate.LiveMigrateOpts{ + BlockMigration: &blockMigration, + DiskOverCommit: &diskOverCommit, + } + + err = migrate.LiveMigrate(client, server.ID, liveMigrateOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/network_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/network_test.go new file mode 100644 index 000000000000..25fbe4144ce0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/network_test.go @@ -0,0 +1,55 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestNetworksList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + allPages, err := networks.List(client).AllPages() + th.AssertNoErr(t, err) + + allNetworks, err := networks.ExtractNetworks(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, network := range allNetworks { + tools.PrintResource(t, network) + + if network.Label == choices.NetworkName { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestNetworksGet(t *testing.T) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + networkID, err := GetNetworkIDFromNetworks(t, client, choices.NetworkName) + th.AssertNoErr(t, err) + + network, err := networks.Get(client, networkID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, network) + + th.AssertEquals(t, network.Label, choices.NetworkName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/pkg.go new file mode 100644 index 000000000000..a57c1e7bfa75 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/pkg.go @@ -0,0 +1,2 @@ +// Package v2 package contains acceptance tests for the Openstack Compute V2 service. +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/quotaset_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/quotaset_test.go new file mode 100644 index 000000000000..867d3fb55cfc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/quotaset_test.go @@ -0,0 +1,147 @@ +// +build acceptance compute quotasets + +package v2 + +import ( + "fmt" + "os" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestQuotasetGet(t *testing.T) { + clients.SkipRelease(t, "master") + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + identityClient, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + tenantID, err := getTenantID(t, identityClient) + th.AssertNoErr(t, err) + + quotaSet, err := quotasets.Get(client, tenantID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, quotaSet) + + th.AssertEquals(t, quotaSet.FixedIPs, -1) +} + +func getTenantID(t *testing.T, client *gophercloud.ServiceClient) (string, error) { + allPages, err := tenants.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allTenants, err := tenants.ExtractTenants(allPages) + th.AssertNoErr(t, err) + + for _, tenant := range allTenants { + return tenant.ID, nil + } + + return "", fmt.Errorf("Unable to get tenant ID") +} + +func getTenantIDByName(t *testing.T, client *gophercloud.ServiceClient, name string) (string, error) { + allPages, err := tenants.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allTenants, err := tenants.ExtractTenants(allPages) + th.AssertNoErr(t, err) + + for _, tenant := range allTenants { + if tenant.Name == name { + return tenant.ID, nil + } + } + + return "", fmt.Errorf("Unable to get tenant ID") +} + +// What will be sent as desired Quotas to the Server +var UpdateQuotaOpts = quotasets.UpdateOpts{ + FixedIPs: gophercloud.IntToPointer(10), + FloatingIPs: gophercloud.IntToPointer(10), + InjectedFileContentBytes: gophercloud.IntToPointer(10240), + InjectedFilePathBytes: gophercloud.IntToPointer(255), + InjectedFiles: gophercloud.IntToPointer(5), + KeyPairs: gophercloud.IntToPointer(10), + MetadataItems: gophercloud.IntToPointer(128), + RAM: gophercloud.IntToPointer(20000), + SecurityGroupRules: gophercloud.IntToPointer(20), + SecurityGroups: gophercloud.IntToPointer(10), + Cores: gophercloud.IntToPointer(10), + Instances: gophercloud.IntToPointer(4), + ServerGroups: gophercloud.IntToPointer(2), + ServerGroupMembers: gophercloud.IntToPointer(3), +} + +// What the Server hopefully returns as the new Quotas +var UpdatedQuotas = quotasets.QuotaSet{ + FixedIPs: 10, + FloatingIPs: 10, + InjectedFileContentBytes: 10240, + InjectedFilePathBytes: 255, + InjectedFiles: 5, + KeyPairs: 10, + MetadataItems: 128, + RAM: 20000, + SecurityGroupRules: 20, + SecurityGroups: 10, + Cores: 10, + Instances: 4, + ServerGroups: 2, + ServerGroupMembers: 3, +} + +func TestQuotasetUpdateDelete(t *testing.T) { + clients.SkipRelease(t, "master") + + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + idclient, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + tenantid, err := getTenantIDByName(t, idclient, os.Getenv("OS_TENANT_NAME")) + th.AssertNoErr(t, err) + + // save original quotas + orig, err := quotasets.Get(client, tenantid).Extract() + th.AssertNoErr(t, err) + + // Test Update + res, err := quotasets.Update(client, tenantid, UpdateQuotaOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, UpdatedQuotas, *res) + + // Test Delete + _, err = quotasets.Delete(client, tenantid).Extract() + th.AssertNoErr(t, err) + + // We dont know the default quotas, so just check if the quotas are not the same as before + newres, err := quotasets.Get(client, tenantid).Extract() + th.AssertNoErr(t, err) + if newres.RAM == res.RAM { + t.Fatalf("Failed to update quotas") + } + + restore := quotasets.UpdateOpts{} + FillUpdateOptsFromQuotaSet(*orig, &restore) + + // restore original quotas + res, err = quotasets.Update(client, tenantid, restore).Extract() + th.AssertNoErr(t, err) + + orig.ID = "" + th.AssertDeepEquals(t, orig, res) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/rescueunrescue_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/rescueunrescue_test.go new file mode 100644 index 000000000000..bbc38fafa8dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/rescueunrescue_test.go @@ -0,0 +1,25 @@ +// +build acceptance compute rescueunrescue + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServerRescueUnrescue(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + err = RescueServer(t, client, server) + th.AssertNoErr(t, err) + + err = UnrescueServer(t, client, server) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go new file mode 100644 index 000000000000..6f3028432a28 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/secgroup_test.go @@ -0,0 +1,140 @@ +// +build acceptance compute secgroups + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestSecGroupsList(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := secgroups.List(client).AllPages() + th.AssertNoErr(t, err) + + allSecGroups, err := secgroups.ExtractSecurityGroups(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, secgroup := range allSecGroups { + tools.PrintResource(t, secgroup) + + if secgroup.Name == "default" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestSecGroupsCRUD(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + securityGroup, err := CreateSecurityGroup(t, client) + th.AssertNoErr(t, err) + defer DeleteSecurityGroup(t, client, securityGroup.ID) + + tools.PrintResource(t, securityGroup) + + newName := tools.RandomString("secgroup_", 4) + updateOpts := secgroups.UpdateOpts{ + Name: newName, + Description: tools.RandomString("dec_", 10), + } + updatedSecurityGroup, err := secgroups.Update(client, securityGroup.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, updatedSecurityGroup) + + t.Logf("Updated %s's name to %s", updatedSecurityGroup.ID, updatedSecurityGroup.Name) + + th.AssertEquals(t, updatedSecurityGroup.Name, newName) +} + +func TestSecGroupsRuleCreate(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + securityGroup, err := CreateSecurityGroup(t, client) + th.AssertNoErr(t, err) + defer DeleteSecurityGroup(t, client, securityGroup.ID) + + tools.PrintResource(t, securityGroup) + + rule, err := CreateSecurityGroupRule(t, client, securityGroup.ID) + th.AssertNoErr(t, err) + defer DeleteSecurityGroupRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + newSecurityGroup, err := secgroups.Get(client, securityGroup.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newSecurityGroup) + + th.AssertEquals(t, len(newSecurityGroup.Rules), 1) +} + +func TestSecGroupsAddGroupToServer(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + securityGroup, err := CreateSecurityGroup(t, client) + th.AssertNoErr(t, err) + defer DeleteSecurityGroup(t, client, securityGroup.ID) + + rule, err := CreateSecurityGroupRule(t, client, securityGroup.ID) + th.AssertNoErr(t, err) + defer DeleteSecurityGroupRule(t, client, rule.ID) + + t.Logf("Adding group %s to server %s", securityGroup.ID, server.ID) + err = secgroups.AddServer(client, server.ID, securityGroup.Name).ExtractErr() + th.AssertNoErr(t, err) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + + var found bool + for _, sg := range server.SecurityGroups { + if sg["name"] == securityGroup.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + t.Logf("Removing group %s from server %s", securityGroup.ID, server.ID) + err = secgroups.RemoveServer(client, server.ID, securityGroup.Name).ExtractErr() + th.AssertNoErr(t, err) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + found = false + + tools.PrintResource(t, server) + + for _, sg := range server.SecurityGroups { + if sg["name"] == securityGroup.Name { + found = true + } + } + + th.AssertEquals(t, found, false) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go new file mode 100644 index 000000000000..8b7af0f3c11e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servergroup_test.go @@ -0,0 +1,71 @@ +// +build acceptance compute servergroups + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServergroupsCreateDelete(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + serverGroup, err := CreateServerGroup(t, client, "anti-affinity") + th.AssertNoErr(t, err) + defer DeleteServerGroup(t, client, serverGroup) + + serverGroup, err = servergroups.Get(client, serverGroup.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, serverGroup) + + allPages, err := servergroups.List(client).AllPages() + th.AssertNoErr(t, err) + + allServerGroups, err := servergroups.ExtractServerGroups(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, sg := range allServerGroups { + tools.PrintResource(t, serverGroup) + + if sg.ID == serverGroup.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestServergroupsAffinityPolicy(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + serverGroup, err := CreateServerGroup(t, client, "affinity") + th.AssertNoErr(t, err) + defer DeleteServerGroup(t, client, serverGroup) + + firstServer, err := CreateServerInServerGroup(t, client, serverGroup) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, firstServer) + + firstServer, err = servers.Get(client, firstServer.ID).Extract() + th.AssertNoErr(t, err) + + secondServer, err := CreateServerInServerGroup(t, client, serverGroup) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, secondServer) + + secondServer, err = servers.Get(client, secondServer.ID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, firstServer.HostID, secondServer.HostID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servers_test.go new file mode 100644 index 000000000000..bd7c8af767cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/servers_test.go @@ -0,0 +1,489 @@ +// +build acceptance compute servers + +package v2 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServersCreateDestroy(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + allPages, err := servers.List(client, servers.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + allServers, err := servers.ExtractServers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, s := range allServers { + tools.PrintResource(t, server) + + if s.ID == server.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + allAddressPages, err := servers.ListAddresses(client, server.ID).AllPages() + th.AssertNoErr(t, err) + + allAddresses, err := servers.ExtractAddresses(allAddressPages) + th.AssertNoErr(t, err) + + for network, address := range allAddresses { + t.Logf("Addresses on %s: %+v", network, address) + } + + allInterfacePages, err := attachinterfaces.List(client, server.ID).AllPages() + th.AssertNoErr(t, err) + + allInterfaces, err := attachinterfaces.ExtractInterfaces(allInterfacePages) + th.AssertNoErr(t, err) + + for _, iface := range allInterfaces { + t.Logf("Interfaces: %+v", iface) + } + + allNetworkAddressPages, err := servers.ListAddressesByNetwork(client, server.ID, choices.NetworkName).AllPages() + th.AssertNoErr(t, err) + + allNetworkAddresses, err := servers.ExtractNetworkAddresses(allNetworkAddressPages) + th.AssertNoErr(t, err) + + t.Logf("Addresses on %s:", choices.NetworkName) + for _, address := range allNetworkAddresses { + t.Logf("%+v", address) + } +} + +func TestServersWithExtensionsCreateDestroy(t *testing.T) { + clients.RequireLong(t) + + var extendedServer struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + extendedstatus.ServerExtendedStatusExt + serverusage.UsageExt + } + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + err = servers.Get(client, server.ID).ExtractInto(&extendedServer) + th.AssertNoErr(t, err) + tools.PrintResource(t, extendedServer) + + th.AssertEquals(t, extendedServer.AvailabilityZone, "nova") + th.AssertEquals(t, int(extendedServer.PowerState), extendedstatus.RUNNING) + th.AssertEquals(t, extendedServer.TaskState, "") + th.AssertEquals(t, extendedServer.VmState, "active") + th.AssertEquals(t, extendedServer.LaunchedAt.IsZero(), false) + th.AssertEquals(t, extendedServer.TerminatedAt.IsZero(), true) +} + +func TestServersWithoutImageRef(t *testing.T) { + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServerWithoutImageRef(t, client) + if err != nil { + if err400, ok := err.(*gophercloud.ErrUnexpectedResponseCode); ok { + if !strings.Contains("Missing imageRef attribute", string(err400.Body)) { + defer DeleteServer(t, client, server) + } + } + } +} + +func TestServersUpdate(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + alternateName := tools.RandomString("ACPTTEST", 16) + for alternateName == server.Name { + alternateName = tools.RandomString("ACPTTEST", 16) + } + + t.Logf("Attempting to rename the server to %s.", alternateName) + + updateOpts := servers.UpdateOpts{ + Name: alternateName, + } + + updated, err := servers.Update(client, server.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, updated.ID, server.ID) + + err = tools.WaitFor(func() (bool, error) { + latest, err := servers.Get(client, updated.ID).Extract() + if err != nil { + return false, err + } + + return latest.Name == alternateName, nil + }) +} + +func TestServersMetadata(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + tools.PrintResource(t, server) + + metadata, err := servers.UpdateMetadata(client, server.ID, servers.MetadataOpts{ + "foo": "bar", + "this": "that", + }).Extract() + th.AssertNoErr(t, err) + t.Logf("UpdateMetadata result: %+v\n", metadata) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + + expectedMetadata := map[string]string{ + "abc": "def", + "foo": "bar", + "this": "that", + } + th.AssertDeepEquals(t, expectedMetadata, server.Metadata) + + err = servers.DeleteMetadatum(client, server.ID, "foo").ExtractErr() + th.AssertNoErr(t, err) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + + expectedMetadata = map[string]string{ + "abc": "def", + "this": "that", + } + th.AssertDeepEquals(t, expectedMetadata, server.Metadata) + + metadata, err = servers.CreateMetadatum(client, server.ID, servers.MetadatumOpts{ + "foo": "baz", + }).Extract() + th.AssertNoErr(t, err) + t.Logf("CreateMetadatum result: %+v\n", metadata) + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, server) + + expectedMetadata = map[string]string{ + "abc": "def", + "this": "that", + "foo": "baz", + } + th.AssertDeepEquals(t, expectedMetadata, server.Metadata) + + metadata, err = servers.Metadatum(client, server.ID, "foo").Extract() + th.AssertNoErr(t, err) + t.Logf("Metadatum result: %+v\n", metadata) + th.AssertEquals(t, "baz", metadata["foo"]) + + metadata, err = servers.Metadata(client, server.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Metadata result: %+v\n", metadata) + + th.AssertDeepEquals(t, expectedMetadata, metadata) + + metadata, err = servers.ResetMetadata(client, server.ID, servers.MetadataOpts{}).Extract() + th.AssertNoErr(t, err) + t.Logf("ResetMetadata result: %+v\n", metadata) + th.AssertDeepEquals(t, map[string]string{}, metadata) +} + +func TestServersActionChangeAdminPassword(t *testing.T) { + clients.RequireLong(t) + clients.RequireGuestAgent(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + randomPassword := tools.MakeNewPassword(server.AdminPass) + res := servers.ChangeAdminPassword(client, server.ID, randomPassword) + th.AssertNoErr(t, res.Err) + + if err = WaitForComputeStatus(client, server, "PASSWORD"); err != nil { + t.Fatal(err) + } + + if err = WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestServersActionReboot(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + t.Logf("Attempting reboot of server %s", server.ID) + res := servers.Reboot(client, server.ID, rebootOpts) + th.AssertNoErr(t, res.Err) + + if err = WaitForComputeStatus(client, server, "REBOOT"); err != nil { + t.Fatal(err) + } + + if err = WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestServersActionRebuild(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to rebuild server %s", server.ID) + + rebuildOpts := servers.RebuildOpts{ + Name: tools.RandomString("ACPTTEST", 16), + AdminPass: tools.MakeNewPassword(server.AdminPass), + ImageID: choices.ImageID, + } + + rebuilt, err := servers.Rebuild(client, server.ID, rebuildOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, rebuilt.ID, server.ID) + + if err = WaitForComputeStatus(client, rebuilt, "REBUILD"); err != nil { + t.Fatal(err) + } + + if err = WaitForComputeStatus(client, rebuilt, "ACTIVE"); err != nil { + t.Fatal(err) + } +} + +func TestServersActionResizeConfirm(t *testing.T) { + clients.RequireLong(t) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to resize server %s", server.ID) + ResizeServer(t, client, server) + + t.Logf("Attempting to confirm resize for server %s", server.ID) + if res := servers.ConfirmResize(client, server.ID); res.Err != nil { + t.Fatal(res.Err) + } + + if err = WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, server.Flavor["id"], choices.FlavorIDResize) +} + +func TestServersActionResizeRevert(t *testing.T) { + clients.RequireLong(t) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to resize server %s", server.ID) + ResizeServer(t, client, server) + + t.Logf("Attempting to revert resize for server %s", server.ID) + if res := servers.RevertResize(client, server.ID); res.Err != nil { + t.Fatal(res.Err) + } + + if err = WaitForComputeStatus(client, server, "ACTIVE"); err != nil { + t.Fatal(err) + } + + server, err = servers.Get(client, server.ID).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, server.Flavor["id"], choices.FlavorID) +} + +func TestServersActionPause(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to pause server %s", server.ID) + err = pauseunpause.Pause(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = WaitForComputeStatus(client, server, "PAUSED") + th.AssertNoErr(t, err) + + err = pauseunpause.Unpause(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = WaitForComputeStatus(client, server, "ACTIVE") + th.AssertNoErr(t, err) +} + +func TestServersActionSuspend(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to suspend server %s", server.ID) + err = suspendresume.Suspend(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = WaitForComputeStatus(client, server, "SUSPENDED") + th.AssertNoErr(t, err) + + err = suspendresume.Resume(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = WaitForComputeStatus(client, server, "ACTIVE") + th.AssertNoErr(t, err) +} + +func TestServersActionLock(t *testing.T) { + clients.RequireLong(t) + clients.RequireNonAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + t.Logf("Attempting to Lock server %s", server.ID) + err = lockunlock.Lock(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Attempting to delete locked server %s", server.ID) + err = servers.Delete(client, server.ID).ExtractErr() + th.AssertEquals(t, err != nil, true) + + t.Logf("Attempting to unlock server %s", server.ID) + err = lockunlock.Unlock(client, server.ID).ExtractErr() + th.AssertNoErr(t, err) + + err = WaitForComputeStatus(client, server, "ACTIVE") + th.AssertNoErr(t, err) +} + +func TestServersConsoleOutput(t *testing.T) { + client, err := clients.NewComputeV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + server, err := CreateServer(t, client) + if err != nil { + t.Fatalf("Unable to create server: %v", err) + } + + defer DeleteServer(t, client, server) + + outputOpts := &servers.ShowConsoleOutputOpts{ + Length: 4, + } + output, err := servers.ShowConsoleOutput(client, server.ID, outputOpts).Extract() + if err != nil { + t.Fatal(err) + } + + tools.PrintResource(t, output) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/services_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/services_test.go new file mode 100644 index 000000000000..5c6484e0e65c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/services_test.go @@ -0,0 +1,36 @@ +// +build acceptance compute services + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServicesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := services.List(client).AllPages() + th.AssertNoErr(t, err) + + allServices, err := services.ExtractServices(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, service := range allServices { + tools.PrintResource(t, service) + + if service.Binary == "nova-scheduler" { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go new file mode 100644 index 000000000000..a53c64d3532f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/tenantnetworks_test.go @@ -0,0 +1,53 @@ +// +build acceptance compute servers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTenantNetworksList(t *testing.T) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + allPages, err := tenantnetworks.List(client).AllPages() + th.AssertNoErr(t, err) + + allTenantNetworks, err := tenantnetworks.ExtractNetworks(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, network := range allTenantNetworks { + tools.PrintResource(t, network) + + if network.Name == choices.NetworkName { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestTenantNetworksGet(t *testing.T) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + th.AssertNoErr(t, err) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + networkID, err := GetNetworkIDFromTenantNetworks(t, client, choices.NetworkName) + th.AssertNoErr(t, err) + + network, err := tenantnetworks.Get(client, networkID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, network) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/usage_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/usage_test.go new file mode 100644 index 000000000000..c998b59e2f46 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/usage_test.go @@ -0,0 +1,91 @@ +// +build acceptance compute usage + +package v2 + +import ( + "strings" + "testing" + "time" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestUsageSingleTenant(t *testing.T) { + t.Skip("This is not passing in OpenLab. Works locally") + + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + DeleteServer(t, client, server) + + endpointParts := strings.Split(client.Endpoint, "/") + tenantID := endpointParts[4] + + end := time.Now() + start := end.AddDate(0, -1, 0) + opts := usage.SingleTenantOpts{ + Start: &start, + End: &end, + } + + err = usage.SingleTenant(client, tenantID, opts).EachPage(func(page pagination.Page) (bool, error) { + tenantUsage, err := usage.ExtractSingleTenant(page) + th.AssertNoErr(t, err) + + tools.PrintResource(t, tenantUsage) + if tenantUsage.TotalHours == 0 { + t.Fatalf("TotalHours should not be 0") + } + + return true, nil + }) + + th.AssertNoErr(t, err) +} + +func TestUsageAllTenants(t *testing.T) { + t.Skip("This is not passing in OpenLab. Works locally") + + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + DeleteServer(t, client, server) + + end := time.Now() + start := end.AddDate(0, -1, 0) + opts := usage.AllTenantsOpts{ + Detailed: true, + Start: &start, + End: &end, + } + + err = usage.AllTenants(client, opts).EachPage(func(page pagination.Page) (bool, error) { + allUsage, err := usage.ExtractAllTenants(page) + th.AssertNoErr(t, err) + + tools.PrintResource(t, allUsage) + + if len(allUsage) == 0 { + t.Fatalf("No usage returned") + } + + if allUsage[0].TotalHours == 0 { + t.Fatalf("TotalHours should not be 0") + } + return true, nil + }) + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go new file mode 100644 index 000000000000..022df830ca0a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/compute/v2/volumeattach_test.go @@ -0,0 +1,38 @@ +// +build acceptance compute volumeattach + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + bs "github.com/gophercloud/gophercloud/acceptance/openstack/blockstorage/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestVolumeAttachAttachment(t *testing.T) { + clients.RequireLong(t) + + client, err := clients.NewComputeV2Client() + th.AssertNoErr(t, err) + + blockClient, err := clients.NewBlockStorageV2Client() + th.AssertNoErr(t, err) + + server, err := CreateServer(t, client) + th.AssertNoErr(t, err) + defer DeleteServer(t, client, server) + + volume, err := bs.CreateVolume(t, blockClient) + th.AssertNoErr(t, err) + defer bs.DeleteVolume(t, blockClient, volume) + + volumeAttachment, err := CreateVolumeAttachment(t, client, blockClient, server, volume) + th.AssertNoErr(t, err) + defer DeleteVolumeAttachment(t, client, blockClient, server, volumeAttachment) + + tools.PrintResource(t, volumeAttachment) + + th.AssertEquals(t, volumeAttachment.ServerID, server.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules.go new file mode 100644 index 000000000000..6bf682cd11d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules.go @@ -0,0 +1,31 @@ +package v1 + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/container/v1/capsules" +) + +// WaitForCapsuleStatus will poll a capsule's status until it either matches +// the specified status or the status becomes Failed. +func WaitForCapsuleStatus(client *gophercloud.ServiceClient, capsule *capsules.Capsule, status string) error { + return tools.WaitFor(func() (bool, error) { + latest, err := capsules.Get(client, capsule.UUID).Extract() + if err != nil { + return false, err + } + + if latest.Status == status { + // Success! + return true, nil + } + + if latest.Status == "Failed" { + return false, fmt.Errorf("Capsule in FAILED state") + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules_test.go new file mode 100644 index 000000000000..48f001df3b35 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/container/v1/capsules_test.go @@ -0,0 +1,88 @@ +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/container/v1/capsules" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCapsule(t *testing.T) { + client, err := clients.NewContainerV1Client() + if err != nil { + t.Fatalf("Unable to create an container v1 client: %v", err) + } + th.AssertNoErr(t, err) + template := new(capsules.Template) + template.Bin = []byte(`{ + "capsuleVersion": "beta", + "kind": "capsule", + "metadata": { + "labels": { + "app": "web", + "app1": "web1" + }, + "name": "template" + }, + "spec": { + "restartPolicy": "Always", + "containers": [ + { + "command": [ + "sleep", + "1000000" + ], + "env": { + "ENV1": "/usr/local/bin", + "ENV2": "/usr/bin" + }, + "image": "ubuntu", + "imagePullPolicy": "ifnotpresent", + "ports": [ + { + "containerPort": 80, + "hostPort": 80, + "name": "nginx-port", + "protocol": "TCP" + } + ], + "resources": { + "requests": { + "cpu": 1, + "memory": 1024 + } + }, + "workDir": "/root" + } + ] + } + }`) + createOpts := capsules.CreateOpts{ + TemplateOpts: template, + } + capsule, err := capsules.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + err = WaitForCapsuleStatus(client, capsule, "Running") + th.AssertNoErr(t, err) + pager := capsules.List(client, nil) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + CapsuleList, err := capsules.ExtractCapsules(page) + th.AssertNoErr(t, err) + + for _, m := range CapsuleList { + capsuleUUID := m.UUID + capsule, err := capsules.Get(client, capsuleUUID).Extract() + + th.AssertNoErr(t, err) + th.AssertEquals(t, capsule.MetaName, "template") + + err = capsules.Delete(client, capsuleUUID).ExtractErr() + th.AssertNoErr(t, err) + + } + return true, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/certificates_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/certificates_test.go new file mode 100644 index 000000000000..c3860f9a7107 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/certificates_test.go @@ -0,0 +1,45 @@ +// +build acceptance containerinfra + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCertificatesCRUD(t *testing.T) { + client, err := clients.NewContainerInfraV1Client() + th.AssertNoErr(t, err) + + clusterUUID := "8934d2d1-6bce-4ffa-a017-fb437777269d" + + opts := certificates.CreateOpts{ + BayUUID: clusterUUID, + CSR: "-----BEGIN CERTIFICATE REQUEST-----\n" + + "MIIByjCCATMCAQAwgYkxCzAJBgNVBAYTAlVTMRMwEQYDVQQIEwpDYWxpZm9ybmlh" + + "MRYwFAYDVQQHEw1Nb3VudGFpbiBWaWV3MRMwEQYDVQQKEwpHb29nbGUgSW5jMR8w" + + "HQYDVQQLExZJbmZvcm1hdGlvbiBUZWNobm9sb2d5MRcwFQYDVQQDEw53d3cuZ29v" + + "Z2xlLmNvbTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEApZtYJCHJ4VpVXHfV" + + "IlstQTlO4qC03hjX+ZkPyvdYd1Q4+qbAeTwXmCUKYHThVRd5aXSqlPzyIBwieMZr" + + "WFlRQddZ1IzXAlVRDWwAo60KecqeAXnnUK+5fXoTI/UgWshre8tJ+x/TMHaQKR/J" + + "cIWPhqaQhsJuzZbvAdGA80BLxdMCAwEAAaAAMA0GCSqGSIb3DQEBBQUAA4GBAIhl" + + "4PvFq+e7ipARgI5ZM+GZx6mpCz44DTo0JkwfRDf+BtrsaC0q68eTf2XhYOsq4fkH" + + "Q0uA0aVog3f5iJxCa3Hp5gxbJQ6zV6kJ0TEsuaaOhEko9sdpCoPOnRBm2i/XRD2D" + + "6iNh8f8z0ShGsFqjDgFHyF3o+lUyj+UC6H1QW7bn\n" + + "-----END CERTIFICATE REQUEST-----", + } + + createResponse, err := certificates.Create(client, opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, opts.CSR, createResponse.CSR) + + certificate, err := certificates.Get(client, clusterUUID).Extract() + th.AssertNoErr(t, err) + t.Log(certificate.PEM) + + err = certificates.Update(client, clusterUUID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clusters_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clusters_test.go new file mode 100644 index 000000000000..fe5cc4c91187 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clusters_test.go @@ -0,0 +1,38 @@ +// +build acceptance containerinfra + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestClustersCRUD(t *testing.T) { + client, err := clients.NewContainerInfraV1Client() + th.AssertNoErr(t, err) + + clusterTemplate, err := CreateClusterTemplate(t, client) + th.AssertNoErr(t, err) + defer DeleteClusterTemplate(t, client, clusterTemplate.UUID) + + clusterID, err := CreateCluster(t, client, clusterTemplate.UUID) + tools.PrintResource(t, clusterID) + + allPages, err := clusters.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allClusters, err := clusters.ExtractClusters(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allClusters { + if v.UUID == clusterID { + found = true + } + } + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clustertemplates_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clustertemplates_test.go new file mode 100644 index 000000000000..fb27d0971f03 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/clustertemplates_test.go @@ -0,0 +1,70 @@ +// +build acceptance containerinfra + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestClusterTemplatesCRUD(t *testing.T) { + client, err := clients.NewContainerInfraV1Client() + th.AssertNoErr(t, err) + + clusterTemplate, err := CreateClusterTemplate(t, client) + th.AssertNoErr(t, err) + t.Log(clusterTemplate.Name) + + defer DeleteClusterTemplate(t, client, clusterTemplate.UUID) + + // Test clusters list + allPages, err := clustertemplates.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allClusterTemplates, err := clustertemplates.ExtractClusterTemplates(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allClusterTemplates { + if v.UUID == clusterTemplate.UUID { + found = true + } + } + + th.AssertEquals(t, found, true) + + template, err := clustertemplates.Get(client, clusterTemplate.UUID).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, clusterTemplate.UUID, template.UUID) + + // Test cluster update + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/master_lb_enabled", + Value: "false", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/registry_enabled", + Value: "false", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.AddOp, + Path: "/labels/test", + Value: "test", + }, + } + + updateClusterTemplate, err := clustertemplates.Update(client, clusterTemplate.UUID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, false, updateClusterTemplate.MasterLBEnabled) + th.AssertEquals(t, false, updateClusterTemplate.RegistryEnabled) + th.AssertEquals(t, "test", updateClusterTemplate.Labels["test"]) + tools.PrintResource(t, updateClusterTemplate) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/containerinfra.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/containerinfra.go new file mode 100644 index 000000000000..678f030fad89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/containerinfra.go @@ -0,0 +1,155 @@ +package v1 + +import ( + "fmt" + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateClusterTemplate will create a random cluster tempalte. An error will be returned if the +// cluster-template could not be created. +func CreateClusterTemplate(t *testing.T, client *gophercloud.ServiceClient) (*clustertemplates.ClusterTemplate, error) { + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + return nil, err + } + + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create cluster template: %s", name) + + boolFalse := false + createOpts := clustertemplates.CreateOpts{ + COE: "swarm", + DNSNameServer: "8.8.8.8", + DockerStorageDriver: "devicemapper", + ExternalNetworkID: choices.ExternalNetworkID, + FlavorID: choices.FlavorID, + FloatingIPEnabled: &boolFalse, + ImageID: choices.MagnumImageID, + MasterFlavorID: choices.FlavorID, + MasterLBEnabled: &boolFalse, + Name: name, + Public: &boolFalse, + RegistryEnabled: &boolFalse, + ServerType: "vm", + } + + res := clustertemplates.Create(client, createOpts) + if res.Err != nil { + return nil, res.Err + } + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, true, requestID != "") + + t.Logf("Cluster Template %s request ID: %s", name, requestID) + + clusterTemplate, err := res.Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created cluster template: %s", clusterTemplate.Name) + + tools.PrintResource(t, clusterTemplate) + tools.PrintResource(t, clusterTemplate.CreatedAt) + + th.AssertEquals(t, name, clusterTemplate.Name) + th.AssertEquals(t, choices.ExternalNetworkID, clusterTemplate.ExternalNetworkID) + th.AssertEquals(t, choices.MagnumImageID, clusterTemplate.ImageID) + + return clusterTemplate, nil +} + +// DeleteClusterTemplate will delete a given cluster-template. A fatal error will occur if the +// cluster-template could not be deleted. This works best as a deferred function. +func DeleteClusterTemplate(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete cluster-template: %s", id) + + err := clustertemplates.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Error deleting cluster-template %s: %s:", id, err) + } + + t.Logf("Successfully deleted cluster-template: %s", id) + + return +} + +// CreateCluster will create a random cluster. An error will be returned if the +// cluster could not be created. +func CreateCluster(t *testing.T, client *gophercloud.ServiceClient, clusterTemplateID string) (string, error) { + clusterName := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create cluster: %s using template %s", clusterName, clusterTemplateID) + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + return "", err + } + + masterCount := 1 + nodeCount := 1 + createTimeout := 100 + createOpts := clusters.CreateOpts{ + ClusterTemplateID: clusterTemplateID, + CreateTimeout: &createTimeout, + FlavorID: choices.FlavorID, + Keypair: choices.MagnumKeypair, + Labels: map[string]string{}, + MasterCount: &masterCount, + MasterFlavorID: choices.FlavorID, + Name: clusterName, + NodeCount: &nodeCount, + } + + createResult := clusters.Create(client, createOpts) + th.AssertNoErr(t, createResult.Err) + if len(createResult.Header["X-Openstack-Request-Id"]) > 0 { + t.Logf("Cluster Create Request ID: %s", createResult.Header["X-Openstack-Request-Id"][0]) + } + + clusterID, err := createResult.Extract() + if err != nil { + return "", err + } + + t.Logf("Cluster created: %+v", clusterID) + + err = WaitForCluster(client, clusterID, "CREATE_COMPLETE") + if err != nil { + return clusterID, err + } + + t.Logf("Successfully created cluster: %s id: %s", clusterName, clusterID) + return clusterID, nil +} + +func WaitForCluster(client *gophercloud.ServiceClient, clusterID string, status string) error { + return tools.WaitFor(func() (bool, error) { + cluster, err := clusters.Get(client, clusterID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok && status == "DELETE_COMPLETE" { + return true, nil + } + + return false, err + } + + if cluster.Status == status { + return true, nil + } + + if strings.Contains(cluster.Status, "FAILED") { + return false, fmt.Errorf("Cluster %s FAILED. Status=%s StatusReason=%s", clusterID, cluster.Status, cluster.StatusReason) + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/pkg.go new file mode 100644 index 000000000000..b7b1f993d554 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/containerinfra/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/configurations_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/configurations_test.go new file mode 100644 index 000000000000..87e883f45831 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/configurations_test.go @@ -0,0 +1,50 @@ +// +build acceptance db + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/configurations" +) + +func TestConfigurationsCRUD(t *testing.T) { + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to get environment settings") + } + + createOpts := &configurations.CreateOpts{ + Name: "test", + Description: "description", + } + + datastore := configurations.DatastoreOpts{ + Type: choices.DBDatastoreType, + Version: choices.DBDatastoreVersion, + } + createOpts.Datastore = &datastore + + values := make(map[string]interface{}) + values["collation_server"] = "latin1_swedish_ci" + createOpts.Values = values + + cgroup, err := configurations.Create(client, createOpts).Extract() + if err != nil { + t.Fatalf("Unable to create configuration: %v", err) + } + + err = configurations.Delete(client, cgroup.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete configuration: %v", err) + } + + tools.PrintResource(t, cgroup) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/databases_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/databases_test.go new file mode 100644 index 000000000000..dcbf72f040ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/databases_test.go @@ -0,0 +1,55 @@ +// +build acceptance db + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/databases" +) + +// Because it takes so long to create an instance, +// all tests will be housed in a single function. +func TestDatabases(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + // Create and Get an instance. + instance, err := CreateInstance(t, client) + if err != nil { + t.Fatalf("Unable to create instance: %v", err) + } + defer DeleteInstance(t, client, instance.ID) + + // Create a database. + err = CreateDatabase(t, client, instance.ID) + if err != nil { + t.Fatalf("Unable to create database: %v", err) + } + + // List all databases. + allPages, err := databases.List(client, instance.ID).AllPages() + if err != nil { + t.Fatalf("Unable to list databases: %v", err) + } + + allDatabases, err := databases.ExtractDBs(allPages) + if err != nil { + t.Fatalf("Unable to extract databases: %v", err) + } + + for _, db := range allDatabases { + tools.PrintResource(t, db) + } + + defer DeleteDatabase(t, client, instance.ID, allDatabases[0].Name) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/db.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/db.go new file mode 100644 index 000000000000..f5e637f3d9cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/db.go @@ -0,0 +1,145 @@ +// Package v2 contains common functions for creating db resources for use +// in acceptance tests. See the `*_test.go` files for example usages. +package v1 + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" +) + +// CreateDatabase will create a database with a randomly generated name. +// An error will be returned if the database was unable to be created. +func CreateDatabase(t *testing.T, client *gophercloud.ServiceClient, instanceID string) error { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create database: %s", name) + + createOpts := databases.BatchCreateOpts{ + databases.CreateOpts{ + Name: name, + }, + } + + return databases.Create(client, instanceID, createOpts).ExtractErr() +} + +// CreateInstance will create an instance with a randomly generated name. +// The flavor of the instance will be the value of the OS_FLAVOR_ID +// environment variable. The Datastore will be pulled from the +// OS_DATASTORE_TYPE_ID environment variable. +// An error will be returned if the instance was unable to be created. +func CreateInstance(t *testing.T, client *gophercloud.ServiceClient) (*instances.Instance, error) { + if testing.Short() { + t.Skip("Skipping test that requires instance creation in short mode.") + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + return nil, err + } + + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create instance: %s", name) + + createOpts := instances.CreateOpts{ + FlavorRef: choices.FlavorID, + Size: 1, + Name: name, + Datastore: &instances.DatastoreOpts{ + Type: choices.DBDatastoreType, + Version: choices.DBDatastoreVersion, + }, + } + + instance, err := instances.Create(client, createOpts).Extract() + if err != nil { + return instance, err + } + + if err := WaitForInstanceStatus(client, instance, "ACTIVE"); err != nil { + return instance, err + } + + return instances.Get(client, instance.ID).Extract() +} + +// CreateUser will create a user with a randomly generated name. +// An error will be returned if the user was unable to be created. +func CreateUser(t *testing.T, client *gophercloud.ServiceClient, instanceID string) error { + name := tools.RandomString("ACPTTEST", 8) + password := tools.RandomString("", 8) + t.Logf("Attempting to create user: %s", name) + + createOpts := users.BatchCreateOpts{ + users.CreateOpts{ + Name: name, + Password: password, + }, + } + + return users.Create(client, instanceID, createOpts).ExtractErr() +} + +// DeleteDatabase deletes a database. A fatal error will occur if the database +// failed to delete. This works best when used as a deferred function. +func DeleteDatabase(t *testing.T, client *gophercloud.ServiceClient, instanceID, name string) { + t.Logf("Attempting to delete database: %s", name) + err := databases.Delete(client, instanceID, name).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete database %s: %s", name, err) + } + + t.Logf("Deleted database: %s", name) +} + +// DeleteInstance deletes an instance. A fatal error will occur if the instance +// failed to delete. This works best when used as a deferred function. +func DeleteInstance(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete instance: %s", id) + err := instances.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete instance %s: %s", id, err) + } + + t.Logf("Deleted instance: %s", id) +} + +// DeleteUser deletes a user. A fatal error will occur if the user +// failed to delete. This works best when used as a deferred function. +func DeleteUser(t *testing.T, client *gophercloud.ServiceClient, instanceID, name string) { + t.Logf("Attempting to delete user: %s", name) + err := users.Delete(client, instanceID, name).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete users %s: %s", name, err) + } + + t.Logf("Deleted users: %s", name) +} + +// WaitForInstanceState will poll an instance's status until it either matches +// the specified status or the status becomes ERROR. +func WaitForInstanceStatus( + client *gophercloud.ServiceClient, instance *instances.Instance, status string) error { + return tools.WaitFor(func() (bool, error) { + latest, err := instances.Get(client, instance.ID).Extract() + if err != nil { + return false, err + } + + if latest.Status == status { + return true, nil + } + + if latest.Status == "ERROR" { + return false, fmt.Errorf("Instance in ERROR state") + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/flavors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/flavors_test.go new file mode 100644 index 000000000000..73171b94246e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/flavors_test.go @@ -0,0 +1,58 @@ +// +build acceptance db + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/flavors" +) + +func TestFlavorsList(t *testing.T) { + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + allPages, err := flavors.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve flavors: %v", err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + t.Fatalf("Unable to extract flavors: %v", err) + } + + for _, flavor := range allFlavors { + tools.PrintResource(t, &flavor) + } +} + +func TestFlavorsGet(t *testing.T) { + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + allPages, err := flavors.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve flavors: %v", err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + t.Fatalf("Unable to extract flavors: %v", err) + } + + if len(allFlavors) > 0 { + flavor, err := flavors.Get(client, allFlavors[0].StrID).Extract() + if err != nil { + t.Fatalf("Unable to get flavor: %v", err) + } + + tools.PrintResource(t, flavor) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/instances_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/instances_test.go new file mode 100644 index 000000000000..a98047f3e03e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/instances_test.go @@ -0,0 +1,71 @@ +// +build acceptance db + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" +) + +// Because it takes so long to create an instance, +// all tests will be housed in a single function. +func TestInstances(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + // Create and Get an instance. + instance, err := CreateInstance(t, client) + if err != nil { + t.Fatalf("Unable to create instance: %v", err) + } + defer DeleteInstance(t, client, instance.ID) + tools.PrintResource(t, &instance) + + // List all instances. + allPages, err := instances.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list instances: %v", err) + } + + allInstances, err := instances.ExtractInstances(allPages) + if err != nil { + t.Fatalf("Unable to extract instances: %v", err) + } + + for _, instance := range allInstances { + tools.PrintResource(t, instance) + } + + // Enable root user. + _, err = instances.EnableRootUser(client, instance.ID).Extract() + if err != nil { + t.Fatalf("Unable to enable root user: %v", err) + } + + enabled, err := instances.IsRootEnabled(client, instance.ID).Extract() + if err != nil { + t.Fatalf("Unable to check if root user is enabled: %v", err) + } + + t.Logf("Root user is enabled: %t", enabled) + + // Restart + err = instances.Restart(client, instance.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to restart instance: %v", err) + } + + err = WaitForInstanceStatus(client, instance, "ACTIVE") + if err != nil { + t.Fatalf("Unable to restart instance: %v", err) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/pkg.go new file mode 100644 index 000000000000..b7b1f993d554 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/users_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/users_test.go new file mode 100644 index 000000000000..4335eabb2fb8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/db/v1/users_test.go @@ -0,0 +1,54 @@ +// +build acceptance db + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" +) + +// Because it takes so long to create an instance, +// all tests will be housed in a single function. +func TestUsers(t *testing.T) { + if testing.Short() { + t.Skip("Skipping in short mode") + } + + client, err := clients.NewDBV1Client() + if err != nil { + t.Fatalf("Unable to create a DB client: %v", err) + } + + // Create and Get an instance. + instance, err := CreateInstance(t, client) + if err != nil { + t.Fatalf("Unable to create instance: %v", err) + } + defer DeleteInstance(t, client, instance.ID) + + // Create a user. + err = CreateUser(t, client, instance.ID) + if err != nil { + t.Fatalf("Unable to create user: %v", err) + } + + // List all users. + allPages, err := users.List(client, instance.ID).AllPages() + if err != nil { + t.Fatalf("Unable to list users: %v", err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + t.Fatalf("Unable to extract users: %v", err) + } + + for _, user := range allUsers { + tools.PrintResource(t, user) + } + + defer DeleteUser(t, client, instance.ID, allUsers[0].Name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/dns.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/dns.go new file mode 100644 index 000000000000..18cd157fc1c0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/dns.go @@ -0,0 +1,175 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateRecordSet will create a RecordSet with a random name. An error will +// be returned if the zone was unable to be created. +func CreateRecordSet(t *testing.T, client *gophercloud.ServiceClient, zone *zones.Zone) (*recordsets.RecordSet, error) { + t.Logf("Attempting to create recordset: %s", zone.Name) + + createOpts := recordsets.CreateOpts{ + Name: zone.Name, + Type: "A", + TTL: 3600, + Description: "Test recordset", + Records: []string{"10.1.0.2"}, + } + + rs, err := recordsets.Create(client, zone.ID, createOpts).Extract() + if err != nil { + return rs, err + } + + if err := WaitForRecordSetStatus(client, rs, "ACTIVE"); err != nil { + return rs, err + } + + newRS, err := recordsets.Get(client, rs.ZoneID, rs.ID).Extract() + if err != nil { + return newRS, err + } + + t.Logf("Created record set: %s", newRS.Name) + + th.AssertEquals(t, newRS.Name, zone.Name) + + return rs, nil +} + +// CreateZone will create a Zone with a random name. An error will +// be returned if the zone was unable to be created. +func CreateZone(t *testing.T, client *gophercloud.ServiceClient) (*zones.Zone, error) { + zoneName := tools.RandomString("ACPTTEST", 8) + ".com." + + t.Logf("Attempting to create zone: %s", zoneName) + createOpts := zones.CreateOpts{ + Name: zoneName, + Email: "root@example.com", + Type: "PRIMARY", + TTL: 7200, + Description: "Test zone", + } + + zone, err := zones.Create(client, createOpts).Extract() + if err != nil { + return zone, err + } + + if err := WaitForZoneStatus(client, zone, "ACTIVE"); err != nil { + return zone, err + } + + newZone, err := zones.Get(client, zone.ID).Extract() + if err != nil { + return zone, err + } + + t.Logf("Created Zone: %s", zoneName) + + th.AssertEquals(t, newZone.Name, zoneName) + th.AssertEquals(t, newZone.TTL, 7200) + + return newZone, nil +} + +// CreateSecondaryZone will create a Zone with a random name. An error will +// be returned if the zone was unable to be created. +// +// This is only for example purposes as it will try to do a zone transfer. +func CreateSecondaryZone(t *testing.T, client *gophercloud.ServiceClient) (*zones.Zone, error) { + zoneName := tools.RandomString("ACPTTEST", 8) + ".com." + + t.Logf("Attempting to create zone: %s", zoneName) + createOpts := zones.CreateOpts{ + Name: zoneName, + Type: "SECONDARY", + Masters: []string{"10.0.0.1"}, + } + + zone, err := zones.Create(client, createOpts).Extract() + if err != nil { + return zone, err + } + + if err := WaitForZoneStatus(client, zone, "ACTIVE"); err != nil { + return zone, err + } + + newZone, err := zones.Get(client, zone.ID).Extract() + if err != nil { + return zone, err + } + + t.Logf("Created Zone: %s", zoneName) + + th.AssertEquals(t, newZone.Name, zoneName) + th.AssertEquals(t, newZone.Masters[0], "10.0.0.1") + + return newZone, nil +} + +// DeleteRecordSet will delete a specified record set. A fatal error will occur if +// the record set failed to be deleted. This works best when used as a deferred +// function. +func DeleteRecordSet(t *testing.T, client *gophercloud.ServiceClient, rs *recordsets.RecordSet) { + err := recordsets.Delete(client, rs.ZoneID, rs.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete record set %s: %v", rs.ID, err) + } + + t.Logf("Deleted record set: %s", rs.ID) +} + +// DeleteZone will delete a specified zone. A fatal error will occur if +// the zone failed to be deleted. This works best when used as a deferred +// function. +func DeleteZone(t *testing.T, client *gophercloud.ServiceClient, zone *zones.Zone) { + _, err := zones.Delete(client, zone.ID).Extract() + if err != nil { + t.Fatalf("Unable to delete zone %s: %v", zone.ID, err) + } + + t.Logf("Deleted zone: %s", zone.ID) +} + +// WaitForRecordSetStatus will poll a record set's status until it either matches +// the specified status or the status becomes ERROR. +func WaitForRecordSetStatus(client *gophercloud.ServiceClient, rs *recordsets.RecordSet, status string) error { + return gophercloud.WaitFor(600, func() (bool, error) { + current, err := recordsets.Get(client, rs.ZoneID, rs.ID).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} + +// WaitForZoneStatus will poll a zone's status until it either matches +// the specified status or the status becomes ERROR. +func WaitForZoneStatus(client *gophercloud.ServiceClient, zone *zones.Zone, status string) error { + return gophercloud.WaitFor(600, func() (bool, error) { + current, err := zones.Get(client, zone.ID).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/recordsets_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/recordsets_test.go new file mode 100644 index 000000000000..d2d862ba5563 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/recordsets_test.go @@ -0,0 +1,86 @@ +// +build acceptance dns recordsets + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestRecordSetsListByZone(t *testing.T) { + clients.RequireDNS(t) + + client, err := clients.NewDNSV2Client() + th.AssertNoErr(t, err) + + zone, err := CreateZone(t, client) + th.AssertNoErr(t, err) + defer DeleteZone(t, client, zone) + + allPages, err := recordsets.ListByZone(client, zone.ID, nil).AllPages() + th.AssertNoErr(t, err) + + allRecordSets, err := recordsets.ExtractRecordSets(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, recordset := range allRecordSets { + tools.PrintResource(t, &recordset) + + if recordset.ZoneID == zone.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts := recordsets.ListOpts{ + Limit: 1, + } + + err = recordsets.ListByZone(client, zone.ID, listOpts).EachPage( + func(page pagination.Page) (bool, error) { + rr, err := recordsets.ExtractRecordSets(page) + th.AssertNoErr(t, err) + th.AssertEquals(t, len(rr), 1) + return true, nil + }, + ) + th.AssertNoErr(t, err) +} + +func TestRecordSetsCRUD(t *testing.T) { + clients.RequireDNS(t) + + client, err := clients.NewDNSV2Client() + th.AssertNoErr(t, err) + + zone, err := CreateZone(t, client) + th.AssertNoErr(t, err) + defer DeleteZone(t, client, zone) + + tools.PrintResource(t, &zone) + + rs, err := CreateRecordSet(t, client, zone) + th.AssertNoErr(t, err) + defer DeleteRecordSet(t, client, rs) + + tools.PrintResource(t, &rs) + + updateOpts := recordsets.UpdateOpts{ + Description: "New description", + TTL: 0, + } + + newRS, err := recordsets.Update(client, rs.ZoneID, rs.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, &newRS) + + th.AssertEquals(t, newRS.Description, "New description") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/zones_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/zones_test.go new file mode 100644 index 000000000000..263e9fea0a8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/dns/v2/zones_test.go @@ -0,0 +1,54 @@ +// +build acceptance dns zones + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestZonesCRUD(t *testing.T) { + clients.RequireDNS(t) + + client, err := clients.NewDNSV2Client() + th.AssertNoErr(t, err) + + zone, err := CreateZone(t, client) + th.AssertNoErr(t, err) + defer DeleteZone(t, client, zone) + + tools.PrintResource(t, &zone) + + allPages, err := zones.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allZones, err := zones.ExtractZones(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, z := range allZones { + tools.PrintResource(t, &z) + + if zone.Name == z.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + updateOpts := zones.UpdateOpts{ + Description: "New description", + TTL: 0, + } + + newZone, err := zones.Update(client, zone.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, &newZone) + + th.AssertEquals(t, newZone.Description, "New description") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/extension_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/extension_test.go new file mode 100644 index 000000000000..593d75ca4596 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/extension_test.go @@ -0,0 +1,49 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v2/extensions" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestExtensionsList(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + allPages, err := extensions.List(client).AllPages() + th.AssertNoErr(t, err) + + allExtensions, err := extensions.ExtractExtensions(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, extension := range allExtensions { + tools.PrintResource(t, extension) + if extension.Name == "OS-KSCRUD" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestExtensionsGet(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + extension, err := extensions.Get(client, "OS-KSCRUD").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, extension) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/identity.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/identity.go new file mode 100644 index 000000000000..b8e9ee2207e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/identity.go @@ -0,0 +1,192 @@ +// Package v2 contains common functions for creating identity-based resources +// for use in acceptance tests. See the `*_test.go` files for example usages. +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + "github.com/gophercloud/gophercloud/openstack/identity/v2/users" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// AddUserRole will grant a role to a user in a tenant. An error will be +// returned if the grant was unsuccessful. +func AddUserRole(t *testing.T, client *gophercloud.ServiceClient, tenant *tenants.Tenant, user *users.User, role *roles.Role) error { + t.Logf("Attempting to grant user %s role %s in tenant %s", user.ID, role.ID, tenant.ID) + + err := roles.AddUser(client, tenant.ID, user.ID, role.ID).ExtractErr() + if err != nil { + return err + } + + t.Logf("Granted user %s role %s in tenant %s", user.ID, role.ID, tenant.ID) + + return nil +} + +// CreateTenant will create a project with a random name. +// It takes an optional createOpts parameter since creating a project +// has so many options. An error will be returned if the project was +// unable to be created. +func CreateTenant(t *testing.T, client *gophercloud.ServiceClient, c *tenants.CreateOpts) (*tenants.Tenant, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create tenant: %s", name) + + var createOpts tenants.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = tenants.CreateOpts{} + } + + createOpts.Name = name + + tenant, err := tenants.Create(client, createOpts).Extract() + if err != nil { + return tenant, err + } + + t.Logf("Successfully created project %s with ID %s", name, tenant.ID) + + th.AssertEquals(t, name, tenant.Name) + + return tenant, nil +} + +// CreateUser will create a user with a random name and adds them to the given +// tenant. An error will be returned if the user was unable to be created. +func CreateUser(t *testing.T, client *gophercloud.ServiceClient, tenant *tenants.Tenant) (*users.User, error) { + userName := tools.RandomString("user_", 5) + userEmail := userName + "@foo.com" + t.Logf("Creating user: %s", userName) + + createOpts := users.CreateOpts{ + Name: userName, + Enabled: gophercloud.Disabled, + TenantID: tenant.ID, + Email: userEmail, + } + + user, err := users.Create(client, createOpts).Extract() + if err != nil { + return user, err + } + + th.AssertEquals(t, userName, user.Name) + + return user, nil +} + +// DeleteTenant will delete a tenant by ID. A fatal error will occur if +// the tenant ID failed to be deleted. This works best when using it as +// a deferred function. +func DeleteTenant(t *testing.T, client *gophercloud.ServiceClient, tenantID string) { + err := tenants.Delete(client, tenantID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete tenant %s: %v", tenantID, err) + } + + t.Logf("Deleted tenant: %s", tenantID) +} + +// DeleteUser will delete a user. A fatal error will occur if the delete was +// unsuccessful. This works best when used as a deferred function. +func DeleteUser(t *testing.T, client *gophercloud.ServiceClient, user *users.User) { + t.Logf("Attempting to delete user: %s", user.Name) + + result := users.Delete(client, user.ID) + if result.Err != nil { + t.Fatalf("Unable to delete user") + } + + t.Logf("Deleted user: %s", user.Name) +} + +// DeleteUserRole will revoke a role of a user in a tenant. A fatal error will +// occur if the revoke was unsuccessful. This works best when used as a +// deferred function. +func DeleteUserRole(t *testing.T, client *gophercloud.ServiceClient, tenant *tenants.Tenant, user *users.User, role *roles.Role) { + t.Logf("Attempting to remove role %s from user %s in tenant %s", role.ID, user.ID, tenant.ID) + + err := roles.DeleteUser(client, tenant.ID, user.ID, role.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to remove role") + } + + t.Logf("Removed role %s from user %s in tenant %s", role.ID, user.ID, tenant.ID) +} + +// FindRole finds all roles that the current authenticated client has access +// to and returns the first one found. An error will be returned if the lookup +// was unsuccessful. +func FindRole(t *testing.T, client *gophercloud.ServiceClient) (*roles.Role, error) { + var role *roles.Role + + allPages, err := roles.List(client).AllPages() + if err != nil { + return role, err + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + return role, err + } + + for _, r := range allRoles { + role = &r + break + } + + return role, nil +} + +// FindTenant finds all tenants that the current authenticated client has access +// to and returns the first one found. An error will be returned if the lookup +// was unsuccessful. +func FindTenant(t *testing.T, client *gophercloud.ServiceClient) (*tenants.Tenant, error) { + var tenant *tenants.Tenant + + allPages, err := tenants.List(client, nil).AllPages() + if err != nil { + return tenant, err + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + return tenant, err + } + + for _, t := range allTenants { + tenant = &t + break + } + + return tenant, nil +} + +// UpdateUser will update an existing user with a new randomly generated name. +// An error will be returned if the update was unsuccessful. +func UpdateUser(t *testing.T, client *gophercloud.ServiceClient, user *users.User) (*users.User, error) { + userName := tools.RandomString("user_", 5) + userEmail := userName + "@foo.com" + + t.Logf("Attempting to update user name from %s to %s", user.Name, userName) + + updateOpts := users.UpdateOpts{ + Name: userName, + Email: userEmail, + } + + newUser, err := users.Update(client, user.ID, updateOpts).Extract() + if err != nil { + return newUser, err + } + + th.AssertEquals(t, userName, newUser.Name) + + return newUser, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/pkg.go new file mode 100644 index 000000000000..5ec3cc8e833b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/role_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/role_test.go new file mode 100644 index 000000000000..bc9d26ec3483 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/role_test.go @@ -0,0 +1,76 @@ +// +build acceptance identity roles + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles" + "github.com/gophercloud/gophercloud/openstack/identity/v2/users" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestRolesAddToUser(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2AdminClient() + th.AssertNoErr(t, err) + + tenant, err := FindTenant(t, client) + th.AssertNoErr(t, err) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, tenant) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user) + + err = AddUserRole(t, client, tenant, user, role) + th.AssertNoErr(t, err) + defer DeleteUserRole(t, client, tenant, user, role) + + allPages, err := users.ListRoles(client, tenant.ID, user.ID).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := users.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + t.Logf("Roles of user %s:", user.Name) + var found bool + for _, r := range allRoles { + tools.PrintResource(t, role) + if r.Name == role.Name { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRolesList(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2AdminClient() + th.AssertNoErr(t, err) + + allPages, err := roles.List(client).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, r := range allRoles { + tools.PrintResource(t, r) + if r.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/tenant_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/tenant_test.go new file mode 100644 index 000000000000..13a1c08c9e3d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/tenant_test.go @@ -0,0 +1,65 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTenantsList(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + allPages, err := tenants.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allTenants, err := tenants.ExtractTenants(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, tenant := range allTenants { + tools.PrintResource(t, tenant) + + if tenant.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestTenantsCRUD(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2AdminClient() + th.AssertNoErr(t, err) + + tenant, err := CreateTenant(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteTenant(t, client, tenant.ID) + + tenant, err = tenants.Get(client, tenant.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, tenant) + + updateOpts := tenants.UpdateOpts{ + Description: "some tenant", + } + + newTenant, err := tenants.Update(client, tenant.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newTenant) + + th.AssertEquals(t, newTenant.Description, "some tenant") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/token_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/token_test.go new file mode 100644 index 000000000000..30ebcc2bf005 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/token_test.go @@ -0,0 +1,60 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTokenAuthenticate(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2UnauthenticatedClient() + th.AssertNoErr(t, err) + + authOptions, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + result := tokens.Create(client, authOptions) + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + + tools.PrintResource(t, token) + + catalog, err := result.ExtractServiceCatalog() + th.AssertNoErr(t, err) + + for _, entry := range catalog.Entries { + tools.PrintResource(t, entry) + } +} + +func TestTokenValidate(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2Client() + th.AssertNoErr(t, err) + + authOptions, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + result := tokens.Create(client, authOptions) + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + + tools.PrintResource(t, token) + + getResult := tokens.Get(client, token.ID) + user, err := getResult.ExtractUser() + th.AssertNoErr(t, err) + + tools.PrintResource(t, user) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/user_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/user_test.go new file mode 100644 index 000000000000..caaaaf936a42 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v2/user_test.go @@ -0,0 +1,59 @@ +// +build acceptance identity + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v2/users" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestUsersList(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2AdminClient() + th.AssertNoErr(t, err) + + allPages, err := users.List(client).AllPages() + th.AssertNoErr(t, err) + + allUsers, err := users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, user := range allUsers { + tools.PrintResource(t, user) + + if user.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestUsersCreateUpdateDelete(t *testing.T) { + clients.RequireIdentityV2(t) + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV2AdminClient() + th.AssertNoErr(t, err) + + tenant, err := FindTenant(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, tenant) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user) + + tools.PrintResource(t, user) + + newUser, err := UpdateUser(t, client, user) + th.AssertNoErr(t, err) + + tools.PrintResource(t, newUser) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/domains_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/domains_test.go new file mode 100644 index 000000000000..7d146464c599 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/domains_test.go @@ -0,0 +1,89 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/domains" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestDomainsList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + var iTrue bool = true + listOpts := domains.ListOpts{ + Enabled: &iTrue, + } + + allPages, err := domains.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allDomains, err := domains.ExtractDomains(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, domain := range allDomains { + tools.PrintResource(t, domain) + + if domain.Name == "Default" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestDomainsGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + p, err := domains.Get(client, "default").Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, p) + + th.AssertEquals(t, p.Name, "Default") +} + +func TestDomainsCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + var iTrue bool = true + createOpts := domains.CreateOpts{ + Description: "Testing Domain", + Enabled: &iTrue, + } + + domain, err := CreateDomain(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteDomain(t, client, domain.ID) + + tools.PrintResource(t, domain) + + th.AssertEquals(t, domain.Description, "Testing Domain") + + var iFalse bool = false + updateOpts := domains.UpdateOpts{ + Description: "Staging Test Domain", + Enabled: &iFalse, + } + + newDomain, err := domains.Update(client, domain.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newDomain) + + th.AssertEquals(t, newDomain.Description, "Staging Test Domain") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go new file mode 100644 index 000000000000..4bc606b34c6f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/endpoint_test.go @@ -0,0 +1,78 @@ +// +build acceptance + +package v3 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestEndpointsList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allPages, err := endpoints.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allEndpoints, err := endpoints.ExtractEndpoints(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, endpoint := range allEndpoints { + tools.PrintResource(t, endpoint) + + if strings.Contains(endpoint.URL, "/v3") { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestEndpointsNavigateCatalog(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + // Discover the service we're interested in. + serviceListOpts := services.ListOpts{ + ServiceType: "compute", + } + + allPages, err := services.List(client, serviceListOpts).AllPages() + th.AssertNoErr(t, err) + + allServices, err := services.ExtractServices(allPages) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(allServices), 1) + + computeService := allServices[0] + tools.PrintResource(t, computeService) + + // Enumerate the endpoints available for this service. + endpointListOpts := endpoints.ListOpts{ + Availability: gophercloud.AvailabilityPublic, + ServiceID: computeService.ID, + } + + allPages, err = endpoints.List(client, endpointListOpts).AllPages() + th.AssertNoErr(t, err) + + allEndpoints, err := endpoints.ExtractEndpoints(allPages) + th.AssertNoErr(t, err) + + th.AssertEquals(t, len(allServices), 1) + + tools.PrintResource(t, allEndpoints[0]) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/groups_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/groups_test.go new file mode 100644 index 000000000000..18c5ae05787d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/groups_test.go @@ -0,0 +1,126 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestGroupCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := groups.CreateOpts{ + Name: "testgroup", + DomainID: "default", + Extra: map[string]interface{}{ + "email": "testgroup@example.com", + }, + } + + // Create Group in the default domain + group, err := CreateGroup(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + tools.PrintResource(t, group) + tools.PrintResource(t, group.Extra) + + updateOpts := groups.UpdateOpts{ + Description: "Test Groups", + Extra: map[string]interface{}{ + "email": "thetestgroup@example.com", + }, + } + + newGroup, err := groups.Update(client, group.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newGroup) + tools.PrintResource(t, newGroup.Extra) + + listOpts := groups.ListOpts{ + DomainID: "default", + } + + // List all Groups in default domain + allPages, err := groups.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allGroups, err := groups.ExtractGroups(allPages) + th.AssertNoErr(t, err) + + for _, g := range allGroups { + tools.PrintResource(t, g) + tools.PrintResource(t, g.Extra) + } + + var found bool + for _, group := range allGroups { + tools.PrintResource(t, group) + tools.PrintResource(t, group.Extra) + + if group.Name == newGroup.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "TEST", + } + + allPages, err = groups.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allGroups, err = groups.ExtractGroups(allPages) + th.AssertNoErr(t, err) + + found = false + for _, group := range allGroups { + tools.PrintResource(t, group) + tools.PrintResource(t, group.Extra) + + if group.Name == newGroup.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "foo", + } + + allPages, err = groups.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allGroups, err = groups.ExtractGroups(allPages) + th.AssertNoErr(t, err) + + found = false + for _, group := range allGroups { + tools.PrintResource(t, group) + tools.PrintResource(t, group.Extra) + + if group.Name == newGroup.Name { + found = true + } + } + + th.AssertEquals(t, found, false) + + // Get the recently created group by ID + p, err := groups.Get(client, group.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, p) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/identity.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/identity.go new file mode 100644 index 000000000000..88dee1ec932d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/identity.go @@ -0,0 +1,341 @@ +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/domains" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/regions" + "github.com/gophercloud/gophercloud/openstack/identity/v3/roles" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateProject will create a project with a random name. +// It takes an optional createOpts parameter since creating a project +// has so many options. An error will be returned if the project was +// unable to be created. +func CreateProject(t *testing.T, client *gophercloud.ServiceClient, c *projects.CreateOpts) (*projects.Project, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create project: %s", name) + + var createOpts projects.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = projects.CreateOpts{} + } + + createOpts.Name = name + + project, err := projects.Create(client, createOpts).Extract() + if err != nil { + return project, err + } + + t.Logf("Successfully created project %s with ID %s", name, project.ID) + + th.AssertEquals(t, project.Name, name) + + return project, nil +} + +// CreateUser will create a user with a random name. +// It takes an optional createOpts parameter since creating a user +// has so many options. An error will be returned if the user was +// unable to be created. +func CreateUser(t *testing.T, client *gophercloud.ServiceClient, c *users.CreateOpts) (*users.User, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create user: %s", name) + + var createOpts users.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = users.CreateOpts{} + } + + createOpts.Name = name + + user, err := users.Create(client, createOpts).Extract() + if err != nil { + return user, err + } + + t.Logf("Successfully created user %s with ID %s", name, user.ID) + + th.AssertEquals(t, user.Name, name) + + return user, nil +} + +// CreateGroup will create a group with a random name. +// It takes an optional createOpts parameter since creating a group +// has so many options. An error will be returned if the group was +// unable to be created. +func CreateGroup(t *testing.T, client *gophercloud.ServiceClient, c *groups.CreateOpts) (*groups.Group, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create group: %s", name) + + var createOpts groups.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = groups.CreateOpts{} + } + + createOpts.Name = name + + group, err := groups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + + t.Logf("Successfully created group %s with ID %s", name, group.ID) + + th.AssertEquals(t, group.Name, name) + + return group, nil +} + +// CreateDomain will create a domain with a random name. +// It takes an optional createOpts parameter since creating a domain +// has many options. An error will be returned if the domain was +// unable to be created. +func CreateDomain(t *testing.T, client *gophercloud.ServiceClient, c *domains.CreateOpts) (*domains.Domain, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create domain: %s", name) + + var createOpts domains.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = domains.CreateOpts{} + } + + createOpts.Name = name + + domain, err := domains.Create(client, createOpts).Extract() + if err != nil { + return domain, err + } + + t.Logf("Successfully created domain %s with ID %s", name, domain.ID) + + th.AssertEquals(t, domain.Name, name) + + return domain, nil +} + +// CreateRole will create a role with a random name. +// It takes an optional createOpts parameter since creating a role +// has so many options. An error will be returned if the role was +// unable to be created. +func CreateRole(t *testing.T, client *gophercloud.ServiceClient, c *roles.CreateOpts) (*roles.Role, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create role: %s", name) + + var createOpts roles.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = roles.CreateOpts{} + } + + createOpts.Name = name + + role, err := roles.Create(client, createOpts).Extract() + if err != nil { + return role, err + } + + t.Logf("Successfully created role %s with ID %s", name, role.ID) + + th.AssertEquals(t, role.Name, name) + + return role, nil +} + +// CreateRegion will create a region with a random name. +// It takes an optional createOpts parameter since creating a region +// has so many options. An error will be returned if the region was +// unable to be created. +func CreateRegion(t *testing.T, client *gophercloud.ServiceClient, c *regions.CreateOpts) (*regions.Region, error) { + id := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create region: %s", id) + + var createOpts regions.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = regions.CreateOpts{} + } + + createOpts.ID = id + + region, err := regions.Create(client, createOpts).Extract() + if err != nil { + return region, err + } + + t.Logf("Successfully created region %s", id) + + th.AssertEquals(t, region.ID, id) + + return region, nil +} + +// CreateService will create a service with a random name. +// It takes an optional createOpts parameter since creating a service +// has so many options. An error will be returned if the service was +// unable to be created. +func CreateService(t *testing.T, client *gophercloud.ServiceClient, c *services.CreateOpts) (*services.Service, error) { + name := tools.RandomString("ACPTTEST", 8) + t.Logf("Attempting to create service: %s", name) + + var createOpts services.CreateOpts + if c != nil { + createOpts = *c + } else { + createOpts = services.CreateOpts{} + } + + createOpts.Extra["name"] = name + + service, err := services.Create(client, createOpts).Extract() + if err != nil { + return service, err + } + + t.Logf("Successfully created service %s", service.ID) + + th.AssertEquals(t, service.Extra["name"], name) + + return service, nil +} + +// DeleteProject will delete a project by ID. A fatal error will occur if +// the project ID failed to be deleted. This works best when using it as +// a deferred function. +func DeleteProject(t *testing.T, client *gophercloud.ServiceClient, projectID string) { + err := projects.Delete(client, projectID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete project %s: %v", projectID, err) + } + + t.Logf("Deleted project: %s", projectID) +} + +// DeleteUser will delete a user by ID. A fatal error will occur if +// the user failed to be deleted. This works best when using it as +// a deferred function. +func DeleteUser(t *testing.T, client *gophercloud.ServiceClient, userID string) { + err := users.Delete(client, userID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete user with ID %s: %v", userID, err) + } + + t.Logf("Deleted user with ID: %s", userID) +} + +// DeleteGroup will delete a group by ID. A fatal error will occur if +// the group failed to be deleted. This works best when using it as +// a deferred function. +func DeleteGroup(t *testing.T, client *gophercloud.ServiceClient, groupID string) { + err := groups.Delete(client, groupID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete group %s: %v", groupID, err) + } + + t.Logf("Deleted group: %s", groupID) +} + +// DeleteDomain will delete a domain by ID. A fatal error will occur if +// the project ID failed to be deleted. This works best when using it as +// a deferred function. +func DeleteDomain(t *testing.T, client *gophercloud.ServiceClient, domainID string) { + err := domains.Delete(client, domainID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete domain %s: %v", domainID, err) + } + + t.Logf("Deleted domain: %s", domainID) +} + +// DeleteRole will delete a role by ID. A fatal error will occur if +// the role failed to be deleted. This works best when using it as +// a deferred function. +func DeleteRole(t *testing.T, client *gophercloud.ServiceClient, roleID string) { + err := roles.Delete(client, roleID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete role %s: %v", roleID, err) + } + + t.Logf("Deleted role: %s", roleID) +} + +// DeleteRegion will delete a reg by ID. A fatal error will occur if +// the region failed to be deleted. This works best when using it as +// a deferred function. +func DeleteRegion(t *testing.T, client *gophercloud.ServiceClient, regionID string) { + err := regions.Delete(client, regionID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete region %s: %v", regionID, err) + } + + t.Logf("Deleted region: %s", regionID) +} + +// DeleteService will delete a reg by ID. A fatal error will occur if +// the service failed to be deleted. This works best when using it as +// a deferred function. +func DeleteService(t *testing.T, client *gophercloud.ServiceClient, serviceID string) { + err := services.Delete(client, serviceID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete service %s: %v", serviceID, err) + } + + t.Logf("Deleted service: %s", serviceID) +} + +// UnassignRole will delete a role assigned to a user/group on a project/domain +// A fatal error will occur if it fails to delete the assignment. +// This works best when using it as a deferred function. +func UnassignRole(t *testing.T, client *gophercloud.ServiceClient, roleID string, opts *roles.UnassignOpts) { + err := roles.Unassign(client, roleID, *opts).ExtractErr() + if err != nil { + t.Fatalf("Unable to unassign a role %v on context %+v: %v", roleID, *opts, err) + } + t.Logf("Unassigned the role %v on context %+v", roleID, *opts) +} + +// FindRole finds all roles that the current authenticated client has access +// to and returns the first one found. An error will be returned if the lookup +// was unsuccessful. +func FindRole(t *testing.T, client *gophercloud.ServiceClient) (*roles.Role, error) { + t.Log("Attempting to find a role") + var role *roles.Role + + allPages, err := roles.List(client, nil).AllPages() + if err != nil { + return nil, err + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + return nil, err + } + + for _, r := range allRoles { + role = &r + break + } + + t.Logf("Successfully found a role %s with ID %s", role.Name, role.ID) + + return role, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/pkg.go new file mode 100644 index 000000000000..eac3ae96a1ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/pkg.go @@ -0,0 +1 @@ +package v3 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/policies_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/policies_test.go new file mode 100644 index 000000000000..3fb22bb22174 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/policies_test.go @@ -0,0 +1,143 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/policies" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestPoliciesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allPages, err := policies.List(client, policies.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err := policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + + for _, policy := range allPolicies { + tools.PrintResource(t, policy) + } +} + +func TestPoliciesCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := policies.CreateOpts{ + Type: "application/json", + Blob: []byte("{'foobar_user': 'role:compute-user'}"), + Extra: map[string]interface{}{ + "description": "policy for foobar_user", + }, + } + + policy, err := policies.Create(client, &createOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, policy) + tools.PrintResource(t, policy.Extra) + + th.AssertEquals(t, policy.Type, createOpts.Type) + th.AssertEquals(t, policy.Blob, string(createOpts.Blob)) + th.AssertEquals(t, policy.Extra["description"], createOpts.Extra["description"]) + + var listOpts policies.ListOpts + + allPages, err := policies.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err := policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, p := range allPolicies { + tools.PrintResource(t, p) + tools.PrintResource(t, p.Extra) + + if p.ID == policy.ID { + found = true + } + } + + th.AssertEquals(t, true, found) + + listOpts.Filters = map[string]string{ + "type__contains": "json", + } + + allPages, err = policies.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err = policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + + found = false + for _, p := range allPolicies { + tools.PrintResource(t, p) + tools.PrintResource(t, p.Extra) + + if p.ID == policy.ID { + found = true + } + } + + th.AssertEquals(t, true, found) + + listOpts.Filters = map[string]string{ + "type__contains": "foobar", + } + + allPages, err = policies.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allPolicies, err = policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + + found = false + for _, p := range allPolicies { + tools.PrintResource(t, p) + tools.PrintResource(t, p.Extra) + + if p.ID == policy.ID { + found = true + } + } + + th.AssertEquals(t, false, found) + + gotPolicy, err := policies.Get(client, policy.ID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, policy, gotPolicy) + + updateOpts := policies.UpdateOpts{ + Type: "text/plain", + Blob: []byte("'foobar_user': 'role:compute-user'"), + Extra: map[string]interface{}{ + "description": "updated policy for foobar_user", + }, + } + + updatedPolicy, err := policies.Update(client, policy.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, updatedPolicy) + tools.PrintResource(t, updatedPolicy.Extra) + + th.AssertEquals(t, updatedPolicy.Type, updateOpts.Type) + th.AssertEquals(t, updatedPolicy.Blob, string(updateOpts.Blob)) + th.AssertEquals(t, updatedPolicy.Extra["description"], updateOpts.Extra["description"]) + + err = policies.Delete(client, policy.ID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/projects_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/projects_test.go new file mode 100644 index 000000000000..7256b80d81ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/projects_test.go @@ -0,0 +1,192 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestProjectsList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + var iTrue bool = true + listOpts := projects.ListOpts{ + Enabled: &iTrue, + } + + allPages, err := projects.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allProjects, err := projects.ExtractProjects(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, project := range allProjects { + tools.PrintResource(t, project) + + if project.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "dmi", + } + + allPages, err = projects.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allProjects, err = projects.ExtractProjects(allPages) + th.AssertNoErr(t, err) + + found = false + for _, project := range allProjects { + tools.PrintResource(t, project) + + if project.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "foo", + } + + allPages, err = projects.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allProjects, err = projects.ExtractProjects(allPages) + th.AssertNoErr(t, err) + + found = false + for _, project := range allProjects { + tools.PrintResource(t, project) + + if project.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, false) +} + +func TestProjectsGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allPages, err := projects.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allProjects, err := projects.ExtractProjects(allPages) + th.AssertNoErr(t, err) + + project := allProjects[0] + p, err := projects.Get(client, project.ID).Extract() + if err != nil { + t.Fatalf("Unable to get project: %v", err) + } + + tools.PrintResource(t, p) + + th.AssertEquals(t, project.Name, p.Name) +} + +func TestProjectsCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + tools.PrintResource(t, project) + + var iFalse bool = false + updateOpts := projects.UpdateOpts{ + Enabled: &iFalse, + } + + updatedProject, err := projects.Update(client, project.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, updatedProject) +} + +func TestProjectsDomain(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + var iTrue = true + createOpts := projects.CreateOpts{ + IsDomain: &iTrue, + } + + projectDomain, err := CreateProject(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, projectDomain.ID) + + tools.PrintResource(t, projectDomain) + + createOpts = projects.CreateOpts{ + DomainID: projectDomain.ID, + } + + project, err := CreateProject(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + tools.PrintResource(t, project) + + th.AssertEquals(t, project.DomainID, projectDomain.ID) + + var iFalse = false + updateOpts := projects.UpdateOpts{ + Enabled: &iFalse, + } + + _, err = projects.Update(client, projectDomain.ID, updateOpts).Extract() + th.AssertNoErr(t, err) +} + +func TestProjectsNested(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + projectMain, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, projectMain.ID) + + tools.PrintResource(t, projectMain) + + createOpts := projects.CreateOpts{ + ParentID: projectMain.ID, + } + + project, err := CreateProject(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + tools.PrintResource(t, project) + + th.AssertEquals(t, project.ParentID, projectMain.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/regions_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/regions_test.go new file mode 100644 index 000000000000..f44a65be8bcf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/regions_test.go @@ -0,0 +1,96 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/regions" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestRegionsList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + listOpts := regions.ListOpts{ + ParentRegionID: "RegionOne", + } + + allPages, err := regions.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allRegions, err := regions.ExtractRegions(allPages) + th.AssertNoErr(t, err) + + for _, region := range allRegions { + tools.PrintResource(t, region) + } +} + +func TestRegionsGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allPages, err := regions.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allRegions, err := regions.ExtractRegions(allPages) + th.AssertNoErr(t, err) + + region := allRegions[0] + p, err := regions.Get(client, region.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, p) + + th.AssertEquals(t, region.ID, p.ID) +} + +func TestRegionsCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := regions.CreateOpts{ + ID: "testregion", + Description: "Region for testing", + Extra: map[string]interface{}{ + "email": "testregion@example.com", + }, + } + + // Create region in the default domain + region, err := CreateRegion(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteRegion(t, client, region.ID) + + tools.PrintResource(t, region) + tools.PrintResource(t, region.Extra) + + updateOpts := regions.UpdateOpts{ + Description: "Region A for testing", + /* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // The following lines should be uncommented once the fix is merged. + + Extra: map[string]interface{}{ + "email": "testregionA@example.com", + }, + */ + } + + newRegion, err := regions.Update(client, region.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newRegion) + tools.PrintResource(t, newRegion.Extra) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/roles_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/roles_test.go new file mode 100644 index 000000000000..c9a395f8329c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/roles_test.go @@ -0,0 +1,657 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/domains" + "github.com/gophercloud/gophercloud/openstack/identity/v3/roles" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestRolesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + listOpts := roles.ListOpts{ + DomainID: "default", + } + + allPages, err := roles.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + for _, role := range allRoles { + tools.PrintResource(t, role) + } +} + +func TestRolesGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + p, err := roles.Get(client, role.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, p) +} + +func TestRolesCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := roles.CreateOpts{ + Name: "testrole", + Extra: map[string]interface{}{ + "description": "test role description", + }, + } + + // Create Role in the default domain + role, err := CreateRole(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteRole(t, client, role.ID) + + tools.PrintResource(t, role) + tools.PrintResource(t, role.Extra) + + var listOpts roles.ListOpts + allPages, err := roles.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, r := range allRoles { + tools.PrintResource(t, r) + tools.PrintResource(t, r.Extra) + + if r.Name == role.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + updateOpts := roles.UpdateOpts{ + Extra: map[string]interface{}{ + "description": "updated test role description", + }, + } + + newRole, err := roles.Update(client, role.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newRole) + tools.PrintResource(t, newRole.Extra) + + th.AssertEquals(t, newRole.Extra["description"], "updated test role description") +} + +func TestRolesFilterList(t *testing.T) { + clients.RequireAdmin(t) + + // For some reason this is not longer working. + // It might be a temporary issue. + clients.SkipRelease(t, "master") + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := roles.CreateOpts{ + Name: "testrole", + Extra: map[string]interface{}{ + "description": "test role description", + }, + } + + // Create Role in the default domain + role, err := CreateRole(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteRole(t, client, role.ID) + + var listOpts roles.ListOpts + listOpts.Filters = map[string]string{ + "name__contains": "TEST", + } + + allPages, err := roles.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + found := false + for _, r := range allRoles { + tools.PrintResource(t, r) + tools.PrintResource(t, r.Extra) + + if r.Name == role.Name { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "reader", + } + + allPages, err = roles.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err = roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + found = false + for _, r := range allRoles { + tools.PrintResource(t, r) + tools.PrintResource(t, r.Extra) + + if r.Name == role.Name { + found = true + } + } + + th.AssertEquals(t, found, false) +} + +func TestRoleListAssignmentForUserOnProject(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + t.Logf("Attempting to assign a role %s to a user %s on a project %s", + role.Name, user.Name, project.Name) + + assignOpts := roles.AssignOpts{ + UserID: user.ID, + ProjectID: project.ID, + } + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a user %s on a project %s", + role.Name, user.Name, project.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + UserID: user.ID, + ProjectID: project.ID, + }) + + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + UserID: user.ID, + ProjectID: project.ID, + } + allPages, err := roles.ListAssignmentsOnResource(client, listAssignmentsOnResourceOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of user %s on project %s:", user.Name, project.Name) + var found bool + for _, _role := range allRoles { + tools.PrintResource(t, _role) + + if _role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRoleListAssignmentForUserOnDomain(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + domain, err := CreateDomain(t, client, &domains.CreateOpts{ + Enabled: gophercloud.Disabled, + }) + th.AssertNoErr(t, err) + defer DeleteDomain(t, client, domain.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + t.Logf("Attempting to assign a role %s to a user %s on a domain %s", + role.Name, user.Name, domain.Name) + + assignOpts := roles.AssignOpts{ + UserID: user.ID, + DomainID: domain.ID, + } + + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a user %s on a domain %s", + role.Name, user.Name, domain.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + UserID: user.ID, + DomainID: domain.ID, + }) + + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + UserID: user.ID, + DomainID: domain.ID, + } + allPages, err := roles.ListAssignmentsOnResource(client, listAssignmentsOnResourceOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of user %s on domain %s:", user.Name, domain.Name) + var found bool + for _, _role := range allRoles { + tools.PrintResource(t, _role) + + if _role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRoleListAssignmentForGroupOnProject(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + group, err := CreateGroup(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + t.Logf("Attempting to assign a role %s to a group %s on a project %s", + role.Name, group.Name, project.Name) + + assignOpts := roles.AssignOpts{ + GroupID: group.ID, + ProjectID: project.ID, + } + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a group %s on a project %s", + role.Name, group.Name, project.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + GroupID: group.ID, + ProjectID: project.ID, + }) + + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + GroupID: group.ID, + ProjectID: project.ID, + } + allPages, err := roles.ListAssignmentsOnResource(client, listAssignmentsOnResourceOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of group %s on project %s:", group.Name, project.Name) + var found bool + for _, _role := range allRoles { + tools.PrintResource(t, _role) + + if _role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRoleListAssignmentForGroupOnDomain(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + domain, err := CreateDomain(t, client, &domains.CreateOpts{ + Enabled: gophercloud.Disabled, + }) + th.AssertNoErr(t, err) + defer DeleteDomain(t, client, domain.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + group, err := CreateGroup(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + t.Logf("Attempting to assign a role %s to a group %s on a domain %s", + role.Name, group.Name, domain.Name) + + assignOpts := roles.AssignOpts{ + GroupID: group.ID, + DomainID: domain.ID, + } + + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a group %s on a domain %s", + role.Name, group.Name, domain.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + GroupID: group.ID, + DomainID: domain.ID, + }) + + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + GroupID: group.ID, + DomainID: domain.ID, + } + allPages, err := roles.ListAssignmentsOnResource(client, listAssignmentsOnResourceOpts).AllPages() + th.AssertNoErr(t, err) + + allRoles, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of group %s on domain %s:", group.Name, domain.Name) + var found bool + for _, _role := range allRoles { + tools.PrintResource(t, _role) + + if _role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRolesAssignToUserOnProject(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + t.Logf("Attempting to assign a role %s to a user %s on a project %s", + role.Name, user.Name, project.Name) + + assignOpts := roles.AssignOpts{ + UserID: user.ID, + ProjectID: project.ID, + } + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a user %s on a project %s", + role.Name, user.Name, project.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + UserID: user.ID, + ProjectID: project.ID, + }) + + lao := roles.ListAssignmentsOpts{ + RoleID: role.ID, + ScopeProjectID: project.ID, + UserID: user.ID, + } + + allPages, err := roles.ListAssignments(client, lao).AllPages() + th.AssertNoErr(t, err) + + allRoleAssignments, err := roles.ExtractRoleAssignments(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of user %s on project %s:", user.Name, project.Name) + var found bool + for _, roleAssignment := range allRoleAssignments { + tools.PrintResource(t, roleAssignment) + + if roleAssignment.Role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRolesAssignToUserOnDomain(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + domain, err := CreateDomain(t, client, &domains.CreateOpts{ + Enabled: gophercloud.Disabled, + }) + th.AssertNoErr(t, err) + defer DeleteDomain(t, client, domain.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + user, err := CreateUser(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + t.Logf("Attempting to assign a role %s to a user %s on a domain %s", + role.Name, user.Name, domain.Name) + + assignOpts := roles.AssignOpts{ + UserID: user.ID, + DomainID: domain.ID, + } + + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a user %s on a domain %s", + role.Name, user.Name, domain.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + UserID: user.ID, + DomainID: domain.ID, + }) + + lao := roles.ListAssignmentsOpts{ + RoleID: role.ID, + ScopeDomainID: domain.ID, + UserID: user.ID, + } + + allPages, err := roles.ListAssignments(client, lao).AllPages() + th.AssertNoErr(t, err) + + allRoleAssignments, err := roles.ExtractRoleAssignments(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of user %s on domain %s:", user.Name, domain.Name) + var found bool + for _, roleAssignment := range allRoleAssignments { + tools.PrintResource(t, roleAssignment) + + if roleAssignment.Role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRolesAssignToGroupOnDomain(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + domain, err := CreateDomain(t, client, &domains.CreateOpts{ + Enabled: gophercloud.Disabled, + }) + th.AssertNoErr(t, err) + defer DeleteDomain(t, client, domain.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + group, err := CreateGroup(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + t.Logf("Attempting to assign a role %s to a group %s on a domain %s", + role.Name, group.Name, domain.Name) + + assignOpts := roles.AssignOpts{ + GroupID: group.ID, + DomainID: domain.ID, + } + + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a group %s on a domain %s", + role.Name, group.Name, domain.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + GroupID: group.ID, + DomainID: domain.ID, + }) + + lao := roles.ListAssignmentsOpts{ + RoleID: role.ID, + ScopeDomainID: domain.ID, + GroupID: group.ID, + } + + allPages, err := roles.ListAssignments(client, lao).AllPages() + th.AssertNoErr(t, err) + + allRoleAssignments, err := roles.ExtractRoleAssignments(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of group %s on domain %s:", group.Name, domain.Name) + var found bool + for _, roleAssignment := range allRoleAssignments { + tools.PrintResource(t, roleAssignment) + + if roleAssignment.Role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestRolesAssignToGroupOnProject(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + role, err := FindRole(t, client) + th.AssertNoErr(t, err) + + group, err := CreateGroup(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + t.Logf("Attempting to assign a role %s to a group %s on a project %s", + role.Name, group.Name, project.Name) + + assignOpts := roles.AssignOpts{ + GroupID: group.ID, + ProjectID: project.ID, + } + err = roles.Assign(client, role.ID, assignOpts).ExtractErr() + th.AssertNoErr(t, err) + + t.Logf("Successfully assigned a role %s to a group %s on a project %s", + role.Name, group.Name, project.Name) + + defer UnassignRole(t, client, role.ID, &roles.UnassignOpts{ + GroupID: group.ID, + ProjectID: project.ID, + }) + + lao := roles.ListAssignmentsOpts{ + RoleID: role.ID, + ScopeProjectID: project.ID, + GroupID: group.ID, + } + + allPages, err := roles.ListAssignments(client, lao).AllPages() + th.AssertNoErr(t, err) + + allRoleAssignments, err := roles.ExtractRoleAssignments(allPages) + th.AssertNoErr(t, err) + + t.Logf("Role assignments of group %s on project %s:", group.Name, project.Name) + var found bool + for _, roleAssignment := range allRoleAssignments { + tools.PrintResource(t, roleAssignment) + + if roleAssignment.Role.ID == role.ID { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/service_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/service_test.go new file mode 100644 index 000000000000..7e072ce3a43e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/service_test.go @@ -0,0 +1,78 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServicesList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + listOpts := services.ListOpts{ + ServiceType: "identity", + } + + allPages, err := services.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allServices, err := services.ExtractServices(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, service := range allServices { + tools.PrintResource(t, service) + + if service.Type == "identity" { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestServicesCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := services.CreateOpts{ + Type: "testing", + Extra: map[string]interface{}{ + "email": "testservice@example.com", + }, + } + + // Create service in the default domain + service, err := CreateService(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteService(t, client, service.ID) + + tools.PrintResource(t, service) + tools.PrintResource(t, service.Extra) + + updateOpts := services.UpdateOpts{ + Type: "testing2", + Extra: map[string]interface{}{ + "description": "Test Users", + "email": "thetestservice@example.com", + }, + } + + newService, err := services.Update(client, service.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newService) + tools.PrintResource(t, newService.Extra) + + th.AssertEquals(t, newService.Extra["description"], "Test Users") +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/token_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/token_test.go new file mode 100644 index 000000000000..ff6e91d49f2a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/token_test.go @@ -0,0 +1,49 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTokensGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + authOptions := tokens.AuthOptions{ + Username: ao.Username, + Password: ao.Password, + DomainName: "default", + } + + token, err := tokens.Create(client, &authOptions).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, token) + + catalog, err := tokens.Get(client, token.ID).ExtractServiceCatalog() + th.AssertNoErr(t, err) + tools.PrintResource(t, catalog) + + user, err := tokens.Get(client, token.ID).ExtractUser() + th.AssertNoErr(t, err) + tools.PrintResource(t, user) + + roles, err := tokens.Get(client, token.ID).ExtractRoles() + th.AssertNoErr(t, err) + tools.PrintResource(t, roles) + + project, err := tokens.Get(client, token.ID).ExtractProject() + th.AssertNoErr(t, err) + tools.PrintResource(t, project) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/users_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/users_test.go new file mode 100644 index 000000000000..5bf2fcb5fbad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3/users_test.go @@ -0,0 +1,334 @@ +// +build acceptance + +package v3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestUsersList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + var iTrue bool = true + listOpts := users.ListOpts{ + Enabled: &iTrue, + } + + allPages, err := users.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allUsers, err := users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, user := range allUsers { + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + if user.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "dmi", + } + + allPages, err = users.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allUsers, err = users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + + found = false + for _, user := range allUsers { + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + if user.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, true) + + listOpts.Filters = map[string]string{ + "name__contains": "foo", + } + + allPages, err = users.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allUsers, err = users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + + found = false + for _, user := range allUsers { + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + if user.Name == "admin" { + found = true + } + } + + th.AssertEquals(t, found, false) +} + +func TestUsersGet(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allPages, err := users.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allUsers, err := users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + + user := allUsers[0] + p, err := users.Get(client, user.ID).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, p) + + th.AssertEquals(t, user.Name, p.Name) +} + +func TestUserCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + project, err := CreateProject(t, client, nil) + th.AssertNoErr(t, err) + defer DeleteProject(t, client, project.ID) + + tools.PrintResource(t, project) + + createOpts := users.CreateOpts{ + DefaultProjectID: project.ID, + Password: "foobar", + DomainID: "default", + Options: map[users.Option]interface{}{ + users.IgnorePasswordExpiry: true, + users.MultiFactorAuthRules: []interface{}{ + []string{"password", "totp"}, + []string{"password", "custom-auth-method"}, + }, + }, + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + }, + } + + user, err := CreateUser(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + iFalse := false + updateOpts := users.UpdateOpts{ + Enabled: &iFalse, + Options: map[users.Option]interface{}{ + users.MultiFactorAuthRules: nil, + }, + Extra: map[string]interface{}{ + "disabled_reason": "DDOS", + }, + } + + newUser, err := users.Update(client, user.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newUser) + tools.PrintResource(t, newUser.Extra) + + th.AssertEquals(t, newUser.Extra["disabled_reason"], "DDOS") +} + +func TestUserChangePassword(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := users.CreateOpts{ + Password: "secretsecret", + DomainID: "default", + } + + user, err := CreateUser(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + changePasswordOpts := users.ChangePasswordOpts{ + OriginalPassword: "secretsecret", + Password: "new_secretsecret", + } + err = users.ChangePassword(client, user.ID, changePasswordOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUsersGroups(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + createOpts := users.CreateOpts{ + Password: "foobar", + DomainID: "default", + } + + user, err := CreateUser(t, client, &createOpts) + th.AssertNoErr(t, err) + defer DeleteUser(t, client, user.ID) + + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + createGroupOpts := groups.CreateOpts{ + Name: "testgroup", + DomainID: "default", + } + + // Create Group in the default domain + group, err := CreateGroup(t, client, &createGroupOpts) + th.AssertNoErr(t, err) + defer DeleteGroup(t, client, group.ID) + + tools.PrintResource(t, group) + tools.PrintResource(t, group.Extra) + + err = users.AddToGroup(client, group.ID, user.ID).ExtractErr() + th.AssertNoErr(t, err) + + allGroupPages, err := users.ListGroups(client, user.ID).AllPages() + th.AssertNoErr(t, err) + + allGroups, err := groups.ExtractGroups(allGroupPages) + th.AssertNoErr(t, err) + + var found bool + for _, g := range allGroups { + tools.PrintResource(t, g) + tools.PrintResource(t, g.Extra) + + if g.ID == group.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + found = false + allUserPages, err := users.ListInGroup(client, group.ID, nil).AllPages() + th.AssertNoErr(t, err) + + allUsers, err := users.ExtractUsers(allUserPages) + th.AssertNoErr(t, err) + + for _, u := range allUsers { + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + if u.ID == user.ID { + found = true + } + } + + th.AssertEquals(t, found, true) + + ok, err := users.IsMemberOfGroup(client, group.ID, user.ID).Extract() + if err != nil { + t.Fatalf("Unable to check whether user belongs to group: %v", err) + } + if !ok { + t.Fatalf("User %s is expected to be a member of group %s", user.ID, group.ID) + } + + err = users.RemoveFromGroup(client, group.ID, user.ID).ExtractErr() + th.AssertNoErr(t, err) + + allGroupPages, err = users.ListGroups(client, user.ID).AllPages() + th.AssertNoErr(t, err) + + allGroups, err = groups.ExtractGroups(allGroupPages) + th.AssertNoErr(t, err) + + found = false + for _, g := range allGroups { + tools.PrintResource(t, g) + tools.PrintResource(t, g.Extra) + + if g.ID == group.ID { + found = true + } + } + + th.AssertEquals(t, found, false) + + found = false + allUserPages, err = users.ListInGroup(client, group.ID, nil).AllPages() + th.AssertNoErr(t, err) + + allUsers, err = users.ExtractUsers(allUserPages) + th.AssertNoErr(t, err) + + for _, u := range allUsers { + tools.PrintResource(t, user) + tools.PrintResource(t, user.Extra) + + if u.ID == user.ID { + found = true + } + } + + th.AssertEquals(t, found, false) + +} + +func TestUsersListProjects(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewIdentityV3Client() + th.AssertNoErr(t, err) + + allUserPages, err := users.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allUsers, err := users.ExtractUsers(allUserPages) + th.AssertNoErr(t, err) + + user := allUsers[0] + + allProjectPages, err := users.ListProjects(client, user.ID).AllPages() + th.AssertNoErr(t, err) + + allProjects, err := projects.ExtractProjects(allProjectPages) + th.AssertNoErr(t, err) + + for _, project := range allProjects { + tools.PrintResource(t, project) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imagedata_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imagedata_test.go new file mode 100644 index 000000000000..d19c38d4a9e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imagedata_test.go @@ -0,0 +1,29 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestImageStage(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + image, err := CreateEmptyImage(t, client) + th.AssertNoErr(t, err) + defer DeleteImage(t, client, image) + + imageFileName := tools.RandomString("image_", 8) + imageFilepath := "/tmp/" + imageFileName + imageURL := ImportImageURL + + err = DownloadImageFileFromURL(t, imageURL, imageFilepath) + th.AssertNoErr(t, err) + defer DeleteImageFile(t, imageFilepath) + + err = StageImage(t, client, imageFilepath, image.ID) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageimport_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageimport_test.go new file mode 100644 index 000000000000..6d746046768d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageimport_test.go @@ -0,0 +1,21 @@ +// +build acceptance imageservice imageimport + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestGetImportInfo(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + importInfo, err := GetImportInfo(t, client) + th.AssertNoErr(t, err) + + tools.PrintResource(t, importInfo) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/images_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/images_test.go new file mode 100644 index 000000000000..0fd657b98c0b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/images_test.go @@ -0,0 +1,182 @@ +// +build acceptance imageservice images + +package v2 + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestImagesListEachPage(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + listOpts := images.ListOpts{ + Limit: 1, + } + + pager := images.List(client, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + images, err := images.ExtractImages(page) + if err != nil { + t.Fatalf("Unable to extract images: %v", err) + } + + for _, image := range images { + tools.PrintResource(t, image) + tools.PrintResource(t, image.Properties) + } + + return true, nil + }) +} + +func TestImagesListAllPages(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + image, err := CreateEmptyImage(t, client) + th.AssertNoErr(t, err) + defer DeleteImage(t, client, image) + + listOpts := images.ListOpts{} + + allPages, err := images.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allImages, err := images.ExtractImages(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, i := range allImages { + tools.PrintResource(t, i) + tools.PrintResource(t, i.Properties) + + if i.Name == image.Name { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestImagesListByDate(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + date := time.Date(2014, 1, 1, 1, 1, 1, 0, time.UTC) + listOpts := images.ListOpts{ + Limit: 1, + CreatedAtQuery: &images.ImageDateQuery{ + Date: date, + Filter: images.FilterGTE, + }, + } + + allPages, err := images.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allImages, err := images.ExtractImages(allPages) + th.AssertNoErr(t, err) + + if len(allImages) == 0 { + t.Fatalf("Query resulted in no results") + } + + for _, image := range allImages { + tools.PrintResource(t, image) + tools.PrintResource(t, image.Properties) + } + + date = time.Date(2049, 1, 1, 1, 1, 1, 0, time.UTC) + listOpts = images.ListOpts{ + Limit: 1, + CreatedAtQuery: &images.ImageDateQuery{ + Date: date, + Filter: images.FilterGTE, + }, + } + + allPages, err = images.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allImages, err = images.ExtractImages(allPages) + th.AssertNoErr(t, err) + + if len(allImages) > 0 { + t.Fatalf("Expected 0 images, got %d", len(allImages)) + } +} + +func TestImagesFilter(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + image, err := CreateEmptyImage(t, client) + th.AssertNoErr(t, err) + defer DeleteImage(t, client, image) + + listOpts := images.ListOpts{ + Tags: []string{"foo", "bar"}, + ContainerFormat: "bare", + DiskFormat: "qcow2", + } + + allPages, err := images.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allImages, err := images.ExtractImages(allPages) + th.AssertNoErr(t, err) + + if len(allImages) == 0 { + t.Fatalf("Query resulted in no results") + } +} + +func TestImagesUpdate(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + image, err := CreateEmptyImage(t, client) + th.AssertNoErr(t, err) + defer DeleteImage(t, client, image) + + newTags := []string{"foo", "bar"} + + updateOpts := images.UpdateOpts{ + images.ReplaceImageName{NewName: image.Name + "foo"}, + images.ReplaceImageTags{NewTags: newTags}, + images.UpdateImageProperty{ + Op: images.AddOp, + Name: "hw_disk_bus", + Value: "scsi", + }, + images.UpdateImageProperty{ + Op: images.RemoveOp, + Name: "architecture", + }, + } + + newImage, err := images.Update(client, image.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + + tools.PrintResource(t, newImage) + tools.PrintResource(t, newImage.Properties) + + th.AssertEquals(t, newImage.Name, image.Name+"foo") + th.AssertDeepEquals(t, newImage.Tags, newTags) + + // Because OpenStack is now adding additional properties automatically, + // it's not possible to do an easy AssertDeepEquals. + th.AssertEquals(t, newImage.Properties["hw_disk_bus"], "scsi") + + if _, ok := newImage.Properties["architecture"]; ok { + t.Fatal("architecture property still exists") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageservice.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageservice.go new file mode 100644 index 000000000000..e8f153d2e045 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/imageservice.go @@ -0,0 +1,159 @@ +// Package v2 contains common functions for creating imageservice resources +// for use in acceptance tests. See the `*_test.go` files for example usages. +package v2 + +import ( + "io" + "net/http" + "os" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateEmptyImage will create an image, but with no actual image data. +// An error will be returned if an image was unable to be created. +func CreateEmptyImage(t *testing.T, client *gophercloud.ServiceClient) (*images.Image, error) { + var image *images.Image + + name := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create image: %s", name) + + protected := false + visibility := images.ImageVisibilityPrivate + createOpts := &images.CreateOpts{ + Name: name, + ContainerFormat: "bare", + DiskFormat: "qcow2", + MinDisk: 0, + MinRAM: 0, + Protected: &protected, + Visibility: &visibility, + Properties: map[string]string{ + "architecture": "x86_64", + }, + Tags: []string{"foo", "bar", "baz"}, + } + + image, err := images.Create(client, createOpts).Extract() + if err != nil { + return image, err + } + + newImage, err := images.Get(client, image.ID).Extract() + if err != nil { + return image, err + } + + t.Logf("Created image %s: %#v", name, newImage) + + th.CheckEquals(t, newImage.Name, name) + th.CheckEquals(t, newImage.Properties["architecture"], "x86_64") + return newImage, nil +} + +// DeleteImage deletes an image. +// A fatal error will occur if the image failed to delete. This works best when +// used as a deferred function. +func DeleteImage(t *testing.T, client *gophercloud.ServiceClient, image *images.Image) { + err := images.Delete(client, image.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete image %s: %v", image.ID, err) + } + + t.Logf("Deleted image: %s", image.ID) +} + +// ImportImageURL contains an URL of a test image that can be imported. +const ImportImageURL = "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" + +// CreateTask will create a task to import the CirrOS image. +// An error will be returned if a task couldn't be created. +func CreateTask(t *testing.T, client *gophercloud.ServiceClient, imageURL string) (*tasks.Task, error) { + t.Logf("Attempting to create an Imageservice import task with image: %s", imageURL) + opts := tasks.CreateOpts{ + Type: "import", + Input: map[string]interface{}{ + "image_properties": map[string]interface{}{ + "container_format": "bare", + "disk_format": "raw", + }, + "import_from_format": "raw", + "import_from": imageURL, + }, + } + task, err := tasks.Create(client, opts).Extract() + if err != nil { + return nil, err + } + + newTask, err := tasks.Get(client, task.ID).Extract() + if err != nil { + return nil, err + } + + return newTask, nil +} + +// GetImportInfo will retrieve Import API information. +func GetImportInfo(t *testing.T, client *gophercloud.ServiceClient) (*imageimport.ImportInfo, error) { + t.Log("Attempting to get the Imageservice Import API information") + importInfo, err := imageimport.Get(client).Extract() + if err != nil { + return nil, err + } + + return importInfo, nil +} + +// StageImage will stage local image file to the referenced remote queued image. +func StageImage(t *testing.T, client *gophercloud.ServiceClient, filepath, imageID string) error { + imageData, err := os.Open(filepath) + if err != nil { + return err + } + defer imageData.Close() + + return imagedata.Stage(client, imageID, imageData).ExtractErr() +} + +// DownloadImageFileFromURL will download an image from the specified URL and +// place it into the specified path. +func DownloadImageFileFromURL(t *testing.T, url, filepath string) error { + file, err := os.Create(filepath) + if err != nil { + return err + } + defer file.Close() + + t.Logf("Attempting to download image from %s", url) + resp, err := http.Get(url) + if err != nil { + return err + } + defer resp.Body.Close() + + size, err := io.Copy(file, resp.Body) + if err != nil { + return err + } + + t.Logf("Downloaded image with size of %d bytes in %s", size, filepath) + return nil +} + +// DeleteImageFile will delete local image file. +func DeleteImageFile(t *testing.T, filepath string) { + err := os.Remove(filepath) + if err != nil { + t.Fatalf("Unable to delete image file %s", filepath) + } + + t.Logf("Successfully deleted image file %s", filepath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/tasks_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/tasks_test.go new file mode 100644 index 000000000000..1e0fbf3f2d72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/imageservice/v2/tasks_test.go @@ -0,0 +1,63 @@ +// +build acceptance imageservice tasks + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTasksListEachPage(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + listOpts := tasks.ListOpts{ + Limit: 1, + } + + pager := tasks.List(client, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + tasks, err := tasks.ExtractTasks(page) + th.AssertNoErr(t, err) + + for _, task := range tasks { + tools.PrintResource(t, task) + } + + return true, nil + }) +} + +func TestTasksListAllPages(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + listOpts := tasks.ListOpts{} + + allPages, err := tasks.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allTasks, err := tasks.ExtractTasks(allPages) + th.AssertNoErr(t, err) + + for _, i := range allTasks { + tools.PrintResource(t, i) + } +} + +func TestTaskCreate(t *testing.T) { + client, err := clients.NewImageServiceV2Client() + th.AssertNoErr(t, err) + + task, err := CreateTask(t, client, ImportImageURL) + if err != nil { + t.Fatalf("Unable to create an Imageservice task: %v", err) + } + + tools.PrintResource(t, task) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/acls_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/acls_test.go new file mode 100644 index 000000000000..54a123a2b231 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/acls_test.go @@ -0,0 +1,103 @@ +// +build acceptance keymanager acls + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestACLCRUD(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + payload := tools.RandomString("SUPERSECRET-", 8) + secret, err := CreateSecretWithPayload(t, client, payload) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + user := tools.RandomString("", 32) + users := []string{user} + iFalse := false + setOpts := acls.SetOpts{ + Type: "read", + Users: &users, + ProjectAccess: &iFalse, + } + + aclRef, err := acls.SetSecretACL(client, secretID, setOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, aclRef) + defer func() { + err := acls.DeleteSecretACL(client, secretID).ExtractErr() + th.AssertNoErr(t, err) + acl, err := acls.GetSecretACL(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + }() + + acl, err := acls.GetSecretACL(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + tools.PrintResource(t, (*acl)["read"].Created) + th.AssertEquals(t, len((*acl)["read"].Users), 1) + th.AssertEquals(t, (*acl)["read"].ProjectAccess, false) + + newUsers := []string{} + updateOpts := acls.SetOpts{ + Type: "read", + Users: &newUsers, + } + + aclRef, err = acls.UpdateSecretACL(client, secretID, updateOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, aclRef) + + acl, err = acls.GetSecretACL(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + tools.PrintResource(t, (*acl)["read"].Created) + th.AssertEquals(t, len((*acl)["read"].Users), 0) + th.AssertEquals(t, (*acl)["read"].ProjectAccess, false) + + container, err := CreateGenericContainer(t, client, secret) + th.AssertNoErr(t, err) + containerID, err := ParseID(container.ContainerRef) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, containerID) + + aclRef, err = acls.SetContainerACL(client, containerID, setOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, aclRef) + defer func() { + err := acls.DeleteContainerACL(client, containerID).ExtractErr() + th.AssertNoErr(t, err) + acl, err := acls.GetContainerACL(client, containerID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + }() + + acl, err = acls.GetContainerACL(client, containerID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + tools.PrintResource(t, (*acl)["read"].Created) + th.AssertEquals(t, len((*acl)["read"].Users), 1) + th.AssertEquals(t, (*acl)["read"].ProjectAccess, false) + + aclRef, err = acls.UpdateContainerACL(client, containerID, updateOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, aclRef) + + acl, err = acls.GetContainerACL(client, containerID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, acl) + tools.PrintResource(t, (*acl)["read"].Created) + th.AssertEquals(t, len((*acl)["read"].Users), 0) + th.AssertEquals(t, (*acl)["read"].ProjectAccess, false) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/containers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/containers_test.go new file mode 100644 index 000000000000..2b5f4d01fe5d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/containers_test.go @@ -0,0 +1,188 @@ +// +build acceptance keymanager containers + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestGenericContainersCRUD(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + payload := tools.RandomString("SUPERSECRET-", 8) + secret, err := CreateSecretWithPayload(t, client, payload) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + container, err := CreateGenericContainer(t, client, secret) + th.AssertNoErr(t, err) + containerID, err := ParseID(container.ContainerRef) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, containerID) + + allPages, err := containers.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allContainers, err := containers.ExtractContainers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allContainers { + if v.ContainerRef == container.ContainerRef { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestCertificateContainer(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + pass := tools.RandomString("", 16) + priv, cert, err := CreateCertificate(t, pass) + th.AssertNoErr(t, err) + + private, err := CreatePrivateSecret(t, client, priv) + th.AssertNoErr(t, err) + secretID, err := ParseID(private.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Private Payload: %s", string(payload)) + + certificate, err := CreateCertificateSecret(t, client, cert) + th.AssertNoErr(t, err) + secretID, err = ParseID(certificate.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err = secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Certificate Payload: %s", string(payload)) + + passphrase, err := CreatePassphraseSecret(t, client, pass) + th.AssertNoErr(t, err) + secretID, err = ParseID(passphrase.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err = secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Passphrase Payload: %s", string(payload)) + + container, err := CreateCertificateContainer(t, client, passphrase, private, certificate) + th.AssertNoErr(t, err) + containerID, err := ParseID(container.ContainerRef) + defer DeleteContainer(t, client, containerID) +} + +func TestRSAContainer(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + pass := tools.RandomString("", 16) + priv, pub, err := CreateRSAKeyPair(t, pass) + th.AssertNoErr(t, err) + + private, err := CreatePrivateSecret(t, client, priv) + th.AssertNoErr(t, err) + secretID, err := ParseID(private.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Private Payload: %s", string(payload)) + + public, err := CreatePublicSecret(t, client, pub) + th.AssertNoErr(t, err) + secretID, err = ParseID(public.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err = secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Public Payload: %s", string(payload)) + + passphrase, err := CreatePassphraseSecret(t, client, pass) + th.AssertNoErr(t, err) + secretID, err = ParseID(passphrase.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err = secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Passphrase Payload: %s", string(payload)) + + container, err := CreateRSAContainer(t, client, passphrase, private, public) + th.AssertNoErr(t, err) + containerID, err := ParseID(container.ContainerRef) + defer DeleteContainer(t, client, containerID) +} + +func TestContainerConsumersCRUD(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + payload := tools.RandomString("SUPERSECRET-", 8) + secret, err := CreateSecretWithPayload(t, client, payload) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + container, err := CreateGenericContainer(t, client, secret) + th.AssertNoErr(t, err) + containerID, err := ParseID(container.ContainerRef) + th.AssertNoErr(t, err) + defer DeleteContainer(t, client, containerID) + + consumerName := tools.RandomString("CONSUMER-", 8) + consumerCreateOpts := containers.CreateConsumerOpts{ + Name: consumerName, + URL: "http://example.com", + } + + container, err = containers.CreateConsumer(client, containerID, consumerCreateOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, container.Consumers) + th.AssertEquals(t, len(container.Consumers), 1) + defer func() { + deleteOpts := containers.DeleteConsumerOpts{ + Name: consumerName, + URL: "http://example.com", + } + + container, err := containers.DeleteConsumer(client, containerID, deleteOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, len(container.Consumers), 0) + }() + + allPages, err := containers.ListConsumers(client, containerID, nil).AllPages() + th.AssertNoErr(t, err) + + allConsumers, err := containers.ExtractConsumers(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allConsumers { + if v.Name == consumerName { + found = true + } + } + + th.AssertEquals(t, found, true) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/keymanager.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/keymanager.go new file mode 100644 index 000000000000..99a85f96879f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/keymanager.go @@ -0,0 +1,713 @@ +package v1 + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/pem" + "fmt" + "math/big" + "strings" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// CreateAsymmetric Order will create a random asymmetric order. +// An error will be returned if the order could not be created. +func CreateAsymmetricOrder(t *testing.T, client *gophercloud.ServiceClient) (*orders.Order, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create order %s", name) + + expiration := time.Date(2049, 1, 1, 1, 1, 1, 0, time.UTC) + createOpts := orders.CreateOpts{ + Type: orders.AsymmetricOrder, + Meta: orders.MetaOpts{ + Name: name, + Algorithm: "rsa", + BitLength: 2048, + Mode: "cbc", + Expiration: &expiration, + }, + } + + order, err := orders.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + orderID, err := ParseID(order.OrderRef) + if err != nil { + return nil, err + } + + err = WaitForOrder(client, orderID) + th.AssertNoErr(t, err) + + order, err = orders.Get(client, orderID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, order) + tools.PrintResource(t, order.Meta.Expiration) + + th.AssertEquals(t, order.Meta.Name, name) + th.AssertEquals(t, order.Type, "asymmetric") + + return order, nil +} + +// CreateCertificateContainer will create a random certificate container. +// An error will be returned if the container could not be created. +func CreateCertificateContainer(t *testing.T, client *gophercloud.ServiceClient, passphrase, private, certificate *secrets.Secret) (*containers.Container, error) { + containerName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create container %s", containerName) + + createOpts := containers.CreateOpts{ + Type: containers.CertificateContainer, + Name: containerName, + SecretRefs: []containers.SecretRef{ + { + Name: "certificate", + SecretRef: certificate.SecretRef, + }, + { + Name: "private_key", + SecretRef: private.SecretRef, + }, + { + Name: "private_key_passphrase", + SecretRef: passphrase.SecretRef, + }, + }, + } + + container, err := containers.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created container: %s", container.ContainerRef) + + containerID, err := ParseID(container.ContainerRef) + if err != nil { + return nil, err + } + + container, err = containers.Get(client, containerID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, container) + + th.AssertEquals(t, container.Name, containerName) + th.AssertEquals(t, container.Type, "certificate") + + return container, nil +} + +// CreateKeyOrder will create a random key order. +// An error will be returned if the order could not be created. +func CreateKeyOrder(t *testing.T, client *gophercloud.ServiceClient) (*orders.Order, error) { + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create order %s", name) + + expiration := time.Date(2049, 1, 1, 1, 1, 1, 0, time.UTC) + createOpts := orders.CreateOpts{ + Type: orders.KeyOrder, + Meta: orders.MetaOpts{ + Name: name, + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Expiration: &expiration, + }, + } + + order, err := orders.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + orderID, err := ParseID(order.OrderRef) + if err != nil { + return nil, err + } + + order, err = orders.Get(client, orderID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, order) + tools.PrintResource(t, order.Meta.Expiration) + + th.AssertEquals(t, order.Meta.Name, name) + th.AssertEquals(t, order.Type, "key") + + return order, nil +} + +// CreateRSAContainer will create a random RSA container. +// An error will be returned if the container could not be created. +func CreateRSAContainer(t *testing.T, client *gophercloud.ServiceClient, passphrase, private, public *secrets.Secret) (*containers.Container, error) { + containerName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create container %s", containerName) + + createOpts := containers.CreateOpts{ + Type: containers.RSAContainer, + Name: containerName, + SecretRefs: []containers.SecretRef{ + { + Name: "public_key", + SecretRef: public.SecretRef, + }, + { + Name: "private_key", + SecretRef: private.SecretRef, + }, + { + Name: "private_key_passphrase", + SecretRef: passphrase.SecretRef, + }, + }, + } + + container, err := containers.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created container: %s", container.ContainerRef) + + containerID, err := ParseID(container.ContainerRef) + if err != nil { + return nil, err + } + + container, err = containers.Get(client, containerID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, container) + + th.AssertEquals(t, container.Name, containerName) + th.AssertEquals(t, container.Type, "rsa") + + return container, nil +} + +// CreateCertificateSecret will create a random certificate secret. An error +// will be returned if the secret could not be created. +func CreateCertificateSecret(t *testing.T, client *gophercloud.ServiceClient, cert []byte) (*secrets.Secret, error) { + b64Cert := base64.StdEncoding.EncodeToString(cert) + + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create public key %s", name) + + createOpts := secrets.CreateOpts{ + Name: name, + SecretType: secrets.CertificateSecret, + Payload: b64Cert, + PayloadContentType: "application/octet-stream", + PayloadContentEncoding: "base64", + Algorithm: "rsa", + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, name) + th.AssertEquals(t, secret.Algorithm, "rsa") + + return secret, nil +} + +// CreateEmptySecret will create a random secret with no payload. An error will +// be returned if the secret could not be created. +func CreateEmptySecret(t *testing.T, client *gophercloud.ServiceClient) (*secrets.Secret, error) { + secretName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create secret %s", secretName) + + createOpts := secrets.CreateOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Name: secretName, + SecretType: secrets.OpaqueSecret, + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, secretName) + th.AssertEquals(t, secret.Algorithm, "aes") + + return secret, nil +} + +// CreateGenericContainer will create a random generic container with a +// specified secret. An error will be returned if the container could not +// be created. +func CreateGenericContainer(t *testing.T, client *gophercloud.ServiceClient, secret *secrets.Secret) (*containers.Container, error) { + containerName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create container %s", containerName) + + createOpts := containers.CreateOpts{ + Type: containers.GenericContainer, + Name: containerName, + SecretRefs: []containers.SecretRef{ + { + Name: secret.Name, + SecretRef: secret.SecretRef, + }, + }, + } + + container, err := containers.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created container: %s", container.ContainerRef) + + containerID, err := ParseID(container.ContainerRef) + if err != nil { + return nil, err + } + + container, err = containers.Get(client, containerID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, container) + + th.AssertEquals(t, container.Name, containerName) + th.AssertEquals(t, container.Type, "generic") + + return container, nil +} + +// CreatePassphraseSecret will create a random passphrase secret. +// An error will be returned if the secret could not be created. +func CreatePassphraseSecret(t *testing.T, client *gophercloud.ServiceClient, passphrase string) (*secrets.Secret, error) { + secretName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create secret %s", secretName) + + createOpts := secrets.CreateOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Name: secretName, + Payload: passphrase, + PayloadContentType: "text/plain", + SecretType: secrets.PassphraseSecret, + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, secretName) + th.AssertEquals(t, secret.Algorithm, "aes") + + return secret, nil +} + +// CreatePublicSecret will create a random public secret. An error +// will be returned if the secret could not be created. +func CreatePublicSecret(t *testing.T, client *gophercloud.ServiceClient, pub []byte) (*secrets.Secret, error) { + b64Cert := base64.StdEncoding.EncodeToString(pub) + + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create public key %s", name) + + createOpts := secrets.CreateOpts{ + Name: name, + SecretType: secrets.PublicSecret, + Payload: b64Cert, + PayloadContentType: "application/octet-stream", + PayloadContentEncoding: "base64", + Algorithm: "rsa", + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, name) + th.AssertEquals(t, secret.Algorithm, "rsa") + + return secret, nil +} + +// CreatePrivateSecret will create a random private secret. An error +// will be returned if the secret could not be created. +func CreatePrivateSecret(t *testing.T, client *gophercloud.ServiceClient, priv []byte) (*secrets.Secret, error) { + b64Cert := base64.StdEncoding.EncodeToString(priv) + + name := tools.RandomString("TESTACC-", 8) + t.Logf("Attempting to create public key %s", name) + + createOpts := secrets.CreateOpts{ + Name: name, + SecretType: secrets.PrivateSecret, + Payload: b64Cert, + PayloadContentType: "application/octet-stream", + PayloadContentEncoding: "base64", + Algorithm: "rsa", + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, name) + th.AssertEquals(t, secret.Algorithm, "rsa") + + return secret, nil +} + +// CreateSecretWithPayload will create a random secret with a given payload. +// An error will be returned if the secret could not be created. +func CreateSecretWithPayload(t *testing.T, client *gophercloud.ServiceClient, payload string) (*secrets.Secret, error) { + secretName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create secret %s", secretName) + + createOpts := secrets.CreateOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Name: secretName, + Payload: payload, + PayloadContentType: "text/plain", + SecretType: secrets.OpaqueSecret, + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, secretName) + th.AssertEquals(t, secret.Algorithm, "aes") + + return secret, nil +} + +// CreateSymmetricSecret will create a random symmetric secret. An error +// will be returned if the secret could not be created. +func CreateSymmetricSecret(t *testing.T, client *gophercloud.ServiceClient) (*secrets.Secret, error) { + name := tools.RandomString("TESTACC-", 8) + key := tools.RandomString("", 256) + b64Key := base64.StdEncoding.EncodeToString([]byte(key)) + + t.Logf("Attempting to create symmetric key %s", name) + + createOpts := secrets.CreateOpts{ + Name: name, + SecretType: secrets.SymmetricSecret, + Payload: b64Key, + PayloadContentType: "application/octet-stream", + PayloadContentEncoding: "base64", + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created secret: %s", secret.SecretRef) + + secretID, err := ParseID(secret.SecretRef) + if err != nil { + return nil, err + } + + secret, err = secrets.Get(client, secretID).Extract() + if err != nil { + return nil, err + } + + tools.PrintResource(t, secret) + + th.AssertEquals(t, secret.Name, name) + th.AssertEquals(t, secret.Algorithm, "aes") + + return secret, nil +} + +// DeleteContainer will delete a container. A fatal error will occur if the +// container could not be deleted. This works best when used as a deferred +// function. +func DeleteContainer(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete container %s", id) + + err := containers.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Could not delete container: %s", err) + } + + t.Logf("Successfully deleted container %s", id) +} + +// DeleteOrder will delete an order. A fatal error will occur if the +// order could not be deleted. This works best when used as a deferred +// function. +func DeleteOrder(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete order %s", id) + + err := orders.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Could not delete order: %s", err) + } + + t.Logf("Successfully deleted order %s", id) +} + +// DeleteSecret will delete a secret. A fatal error will occur if the secret +// could not be deleted. This works best when used as a deferred function. +func DeleteSecret(t *testing.T, client *gophercloud.ServiceClient, id string) { + t.Logf("Attempting to delete secret %s", id) + + err := secrets.Delete(client, id).ExtractErr() + if err != nil { + t.Fatalf("Could not delete secret: %s", err) + } + + t.Logf("Successfully deleted secret %s", id) +} + +func ParseID(ref string) (string, error) { + parts := strings.Split(ref, "/") + if len(parts) < 2 { + return "", fmt.Errorf("Could not parse %s", ref) + } + + return parts[len(parts)-1], nil +} + +// CreateCertificate will create a random certificate. A fatal error will +// be returned if creation failed. +// https://golang.org/src/crypto/tls/generate_cert.go +func CreateCertificate(t *testing.T, passphrase string) ([]byte, []byte, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + block := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + + if passphrase != "" { + block, err = x509.EncryptPEMBlock(rand.Reader, block.Type, block.Bytes, []byte(passphrase), x509.PEMCipherAES256) + if err != nil { + return nil, nil, err + } + } + + keyPem := pem.EncodeToMemory(block) + + serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberLimit) + if err != nil { + return nil, nil, err + } + + tpl := x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + Organization: []string{"Some Org"}, + }, + NotBefore: time.Now(), + NotAfter: time.Now().AddDate(5, 0, 0), + BasicConstraintsValid: true, + } + + cert, err := x509.CreateCertificate(rand.Reader, &tpl, &tpl, &key.PublicKey, key) + if err != nil { + return nil, nil, err + } + + certPem := pem.EncodeToMemory(&pem.Block{ + Type: "CERTIFICATE", + Bytes: cert, + }) + + return keyPem, certPem, nil +} + +// CreateRSAKeyPair will create a random RSA key pair. An error will be +// returned if the pair could not be created. +func CreateRSAKeyPair(t *testing.T, passphrase string) ([]byte, []byte, error) { + key, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return nil, nil, err + } + + block := &pem.Block{ + Type: "RSA PRIVATE KEY", + Bytes: x509.MarshalPKCS1PrivateKey(key), + } + + if passphrase != "" { + block, err = x509.EncryptPEMBlock(rand.Reader, block.Type, block.Bytes, []byte(passphrase), x509.PEMCipherAES256) + if err != nil { + return nil, nil, err + } + } + + keyPem := pem.EncodeToMemory(block) + + asn1Bytes, err := asn1.Marshal(key.PublicKey) + if err != nil { + return nil, nil, err + } + + block = &pem.Block{ + Type: "RSA PUBLIC KEY", + Bytes: asn1Bytes, + } + + pubPem := pem.EncodeToMemory(block) + + return keyPem, pubPem, nil +} + +func WaitForOrder(client *gophercloud.ServiceClient, orderID string) error { + return tools.WaitFor(func() (bool, error) { + order, err := orders.Get(client, orderID).Extract() + if err != nil { + return false, err + } + + if order.SecretRef != "" { + return true, nil + } + + if order.ContainerRef != "" { + return true, nil + } + + if order.Status == "ERROR" { + return false, fmt.Errorf("Order %s in ERROR state", orderID) + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/orders_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/orders_test.go new file mode 100644 index 000000000000..de44debe9123 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/orders_test.go @@ -0,0 +1,84 @@ +// +build acceptance keymanager orders + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestOrdersCRUD(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + order, err := CreateKeyOrder(t, client) + th.AssertNoErr(t, err) + orderID, err := ParseID(order.OrderRef) + th.AssertNoErr(t, err) + defer DeleteOrder(t, client, orderID) + + secretID, err := ParseID(order.SecretRef) + th.AssertNoErr(t, err) + + payloadOpts := secrets.GetPayloadOpts{ + PayloadContentType: "application/octet-stream", + } + payload, err := secrets.GetPayload(client, secretID, payloadOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, payload) + + allPages, err := orders.List(client, nil).AllPages() + th.AssertNoErr(t, err) + + allOrders, err := orders.ExtractOrders(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allOrders { + if v.OrderRef == order.OrderRef { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestOrdersAsymmetric(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + order, err := CreateAsymmetricOrder(t, client) + th.AssertNoErr(t, err) + orderID, err := ParseID(order.OrderRef) + th.AssertNoErr(t, err) + defer DeleteOrder(t, client, orderID) + + containerID, err := ParseID(order.ContainerRef) + th.AssertNoErr(t, err) + + container, err := containers.Get(client, containerID).Extract() + th.AssertNoErr(t, err) + + for _, v := range container.SecretRefs { + secretID, err := ParseID(v.SecretRef) + th.AssertNoErr(t, err) + + payloadOpts := secrets.GetPayloadOpts{ + PayloadContentType: "application/octet-stream", + } + + payload, err := secrets.GetPayload(client, secretID, payloadOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/secrets_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/secrets_test.go new file mode 100644 index 000000000000..dd00e569adb1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/keymanager/v1/secrets_test.go @@ -0,0 +1,242 @@ +// +build acceptance keymanager secrets + +package v1 + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestSecretsCRUD(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + payload := tools.RandomString("SUPERSECRET-", 8) + secret, err := CreateSecretWithPayload(t, client, payload) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + // Test payload retrieval + actual, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, payload, string(actual)) + + // Test listing secrets + createdQuery := &secrets.DateQuery{ + Date: time.Date(2049, 6, 7, 1, 2, 3, 0, time.UTC), + Filter: secrets.DateFilterLT, + } + + listOpts := secrets.ListOpts{ + CreatedQuery: createdQuery, + } + + allPages, err := secrets.List(client, listOpts).AllPages() + th.AssertNoErr(t, err) + + allSecrets, err := secrets.ExtractSecrets(allPages) + th.AssertNoErr(t, err) + + var found bool + for _, v := range allSecrets { + if v.SecretRef == secret.SecretRef { + found = true + } + } + + th.AssertEquals(t, found, true) +} + +func TestSecretsDelayedPayload(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + secret, err := CreateEmptySecret(t, client) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload := tools.RandomString("SUPERSECRET-", 8) + updateOpts := secrets.UpdateOpts{ + ContentType: "text/plain", + Payload: payload, + } + + err = secrets.Update(client, secretID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + + // Test payload retrieval + actual, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, payload, string(actual)) +} + +func TestSecretsMetadataCRUD(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + payload := tools.RandomString("SUPERSECRET-", 8) + secret, err := CreateSecretWithPayload(t, client, payload) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + // Create some metadata + createOpts := secrets.MetadataOpts{ + "foo": "bar", + "something": "something else", + } + + ref, err := secrets.CreateMetadata(client, secretID, createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ref["metadata_ref"], secret.SecretRef+"/metadata") + + // Get the metadata + metadata, err := secrets.GetMetadata(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, metadata) + th.AssertEquals(t, metadata["foo"], "bar") + th.AssertEquals(t, metadata["something"], "something else") + + // Add a single metadatum + metadatumOpts := secrets.MetadatumOpts{ + Key: "bar", + Value: "baz", + } + + err = secrets.CreateMetadatum(client, secretID, metadatumOpts).ExtractErr() + th.AssertNoErr(t, err) + + metadata, err = secrets.GetMetadata(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, metadata) + th.AssertEquals(t, len(metadata), 3) + th.AssertEquals(t, metadata["foo"], "bar") + th.AssertEquals(t, metadata["something"], "something else") + th.AssertEquals(t, metadata["bar"], "baz") + + // Update a metadatum + metadatumOpts.Key = "foo" + metadatumOpts.Value = "foo" + + metadatum, err := secrets.UpdateMetadatum(client, secretID, metadatumOpts).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, metadatum) + th.AssertDeepEquals(t, metadatum.Key, "foo") + th.AssertDeepEquals(t, metadatum.Value, "foo") + + metadata, err = secrets.GetMetadata(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, metadata) + th.AssertEquals(t, len(metadata), 3) + th.AssertEquals(t, metadata["foo"], "foo") + th.AssertEquals(t, metadata["something"], "something else") + th.AssertEquals(t, metadata["bar"], "baz") + + // Delete a metadatum + err = secrets.DeleteMetadatum(client, secretID, "foo").ExtractErr() + th.AssertNoErr(t, err) + + metadata, err = secrets.GetMetadata(client, secretID).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, metadata) + th.AssertEquals(t, len(metadata), 2) + th.AssertEquals(t, metadata["something"], "something else") + th.AssertEquals(t, metadata["bar"], "baz") +} + +func TestSymmetricSecret(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + secret, err := CreateSymmetricSecret(t, client) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) +} + +func TestCertificateSecret(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + pass := tools.RandomString("", 16) + cert, _, err := CreateCertificate(t, pass) + th.AssertNoErr(t, err) + + secret, err := CreateCertificateSecret(t, client, cert) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) +} + +func TestPrivateSecret(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + pass := tools.RandomString("", 16) + priv, _, err := CreateCertificate(t, pass) + th.AssertNoErr(t, err) + + secret, err := CreatePrivateSecret(t, client, priv) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) +} + +func TestPublicSecret(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + _, pub, err := CreateRSAKeyPair(t, "") + th.AssertNoErr(t, err) + + secret, err := CreatePublicSecret(t, client, pub) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) +} + +func TestPassphraseSecret(t *testing.T) { + client, err := clients.NewKeyManagerV1Client() + th.AssertNoErr(t, err) + + pass := tools.RandomString("", 16) + secret, err := CreatePassphraseSecret(t, client, pass) + th.AssertNoErr(t, err) + secretID, err := ParseID(secret.SecretRef) + th.AssertNoErr(t, err) + defer DeleteSecret(t, client, secretID) + + payload, err := secrets.GetPayload(client, secretID, nil).Extract() + th.AssertNoErr(t, err) + tools.PrintResource(t, string(payload)) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/amphorae_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/amphorae_test.go new file mode 100644 index 000000000000..0e3369c49aa3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/amphorae_test.go @@ -0,0 +1,32 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae" +) + +func TestAmphoraeList(t *testing.T) { + clients.RequireAdmin(t) + + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := amphorae.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list amphorae: %v", err) + } + + allAmphorae, err := amphorae.ExtractAmphorae(allPages) + if err != nil { + t.Fatalf("Unable to extract amphorae: %v", err) + } + + for _, amphora := range allAmphorae { + tools.PrintResource(t, amphora) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/l7policies_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/l7policies_test.go new file mode 100644 index 000000000000..d6f092587053 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/l7policies_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking loadbalancer l7policies + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies" +) + +func TestL7PoliciesList(t *testing.T) { + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := l7policies.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list l7policies: %v", err) + } + + allL7Policies, err := l7policies.ExtractL7Policies(allPages) + if err != nil { + t.Fatalf("Unable to extract l7policies: %v", err) + } + + for _, policy := range allL7Policies { + tools.PrintResource(t, policy) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/listeners_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/listeners_test.go new file mode 100644 index 000000000000..f643676544b8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/listeners_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking loadbalancer listeners + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" +) + +func TestListenersList(t *testing.T) { + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := listeners.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list listeners: %v", err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + t.Fatalf("Unable to extract listeners: %v", err) + } + + for _, listener := range allListeners { + tools.PrintResource(t, listener) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancer.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancer.go new file mode 100644 index 000000000000..c15dd068e44f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancer.go @@ -0,0 +1,397 @@ +package v2 + +import ( + "fmt" + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" +) + +const loadbalancerActiveTimeoutSeconds = 300 +const loadbalancerDeleteTimeoutSeconds = 300 + +// CreateListener will create a listener for a given load balancer on a random +// port with a random name. An error will be returned if the listener could not +// be created. +func CreateListener(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer) (*listeners.Listener, error) { + listenerName := tools.RandomString("TESTACCT-", 8) + listenerPort := tools.RandomInt(1, 100) + + t.Logf("Attempting to create listener %s on port %d", listenerName, listenerPort) + + createOpts := listeners.CreateOpts{ + Name: listenerName, + LoadbalancerID: lb.ID, + Protocol: "TCP", + ProtocolPort: listenerPort, + } + + listener, err := listeners.Create(client, createOpts).Extract() + if err != nil { + return listener, err + } + + t.Logf("Successfully created listener %s", listenerName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return listener, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return listener, nil +} + +// CreateLoadBalancer will create a load balancer with a random name on a given +// subnet. An error will be returned if the loadbalancer could not be created. +func CreateLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*loadbalancers.LoadBalancer, error) { + lbName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create loadbalancer %s on subnet %s", lbName, subnetID) + + createOpts := loadbalancers.CreateOpts{ + Name: lbName, + VipSubnetID: subnetID, + AdminStateUp: gophercloud.Enabled, + } + + lb, err := loadbalancers.Create(client, createOpts).Extract() + if err != nil { + return lb, err + } + + t.Logf("Successfully created loadbalancer %s on subnet %s", lbName, subnetID) + t.Logf("Waiting for loadbalancer %s to become active", lbName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return lb, err + } + + t.Logf("LoadBalancer %s is active", lbName) + + return lb, nil +} + +// CreateMember will create a member with a random name, port, address, and +// weight. An error will be returned if the member could not be created. +func CreateMember(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer, pool *pools.Pool, subnetID, subnetCIDR string) (*pools.Member, error) { + memberName := tools.RandomString("TESTACCT-", 8) + memberPort := tools.RandomInt(100, 1000) + memberWeight := tools.RandomInt(1, 10) + + cidrParts := strings.Split(subnetCIDR, "/") + subnetParts := strings.Split(cidrParts[0], ".") + memberAddress := fmt.Sprintf("%s.%s.%s.%d", subnetParts[0], subnetParts[1], subnetParts[2], tools.RandomInt(10, 100)) + + t.Logf("Attempting to create member %s", memberName) + + createOpts := pools.CreateMemberOpts{ + Name: memberName, + ProtocolPort: memberPort, + Weight: memberWeight, + Address: memberAddress, + SubnetID: subnetID, + } + + t.Logf("Member create opts: %#v", createOpts) + + member, err := pools.CreateMember(client, pool.ID, createOpts).Extract() + if err != nil { + return member, err + } + + t.Logf("Successfully created member %s", memberName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return member, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return member, nil +} + +// CreateMonitor will create a monitor with a random name for a specific pool. +// An error will be returned if the monitor could not be created. +func CreateMonitor(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer, pool *pools.Pool) (*monitors.Monitor, error) { + monitorName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create monitor %s", monitorName) + + createOpts := monitors.CreateOpts{ + PoolID: pool.ID, + Name: monitorName, + Delay: 10, + Timeout: 5, + MaxRetries: 5, + Type: "PING", + } + + monitor, err := monitors.Create(client, createOpts).Extract() + if err != nil { + return monitor, err + } + + t.Logf("Successfully created monitor: %s", monitorName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return monitor, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return monitor, nil +} + +// CreatePool will create a pool with a random name with a specified listener +// and loadbalancer. An error will be returned if the pool could not be +// created. +func CreatePool(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer) (*pools.Pool, error) { + poolName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create pool %s", poolName) + + createOpts := pools.CreateOpts{ + Name: poolName, + Protocol: pools.ProtocolTCP, + LoadbalancerID: lb.ID, + LBMethod: pools.LBMethodLeastConnections, + } + + pool, err := pools.Create(client, createOpts).Extract() + if err != nil { + return pool, err + } + + t.Logf("Successfully created pool %s", poolName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return pool, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return pool, nil +} + +// CreateL7Policy will create a l7 policy with a random name with a specified listener +// and loadbalancer. An error will be returned if the l7 policy could not be +// created. +func CreateL7Policy(t *testing.T, client *gophercloud.ServiceClient, listener *listeners.Listener, lb *loadbalancers.LoadBalancer) (*l7policies.L7Policy, error) { + policyName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create l7 policy %s", policyName) + + createOpts := l7policies.CreateOpts{ + Name: policyName, + ListenerID: listener.ID, + Action: l7policies.ActionRedirectToURL, + RedirectURL: "http://www.example.com", + } + + policy, err := l7policies.Create(client, createOpts).Extract() + if err != nil { + return policy, err + } + + t.Logf("Successfully created l7 policy %s", policyName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return policy, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return policy, nil +} + +// CreateL7Rule creates a l7 rule for specified l7 policy. +func CreateL7Rule(t *testing.T, client *gophercloud.ServiceClient, policyID string, lb *loadbalancers.LoadBalancer) (*l7policies.Rule, error) { + t.Logf("Attempting to create l7 rule for policy %s", policyID) + + createOpts := l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeStartWith, + Value: "/api", + } + + rule, err := l7policies.CreateRule(client, policyID, createOpts).Extract() + if err != nil { + return rule, err + } + + t.Logf("Successfully created l7 rule for policy %s", policyID) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return rule, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return rule, nil +} + +// DeleteL7Policy will delete a specified l7 policy. A fatal error will occur if +// the l7 policy could not be deleted. This works best when used as a deferred +// function. +func DeleteL7Policy(t *testing.T, client *gophercloud.ServiceClient, lbID, policyID string) { + t.Logf("Attempting to delete l7 policy %s", policyID) + + if err := l7policies.Delete(client, policyID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete l7 policy: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted l7 policy %s", policyID) +} + +// DeleteL7Rule will delete a specified l7 rule. A fatal error will occur if +// the l7 rule could not be deleted. This works best when used as a deferred +// function. +func DeleteL7Rule(t *testing.T, client *gophercloud.ServiceClient, lbID, policyID, ruleID string) { + t.Logf("Attempting to delete l7 rule %s", ruleID) + + if err := l7policies.DeleteRule(client, policyID, ruleID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete l7 rule: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted l7 rule %s", ruleID) +} + +// DeleteListener will delete a specified listener. A fatal error will occur if +// the listener could not be deleted. This works best when used as a deferred +// function. +func DeleteListener(t *testing.T, client *gophercloud.ServiceClient, lbID, listenerID string) { + t.Logf("Attempting to delete listener %s", listenerID) + + if err := listeners.Delete(client, listenerID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete listener: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted listener %s", listenerID) +} + +// DeleteMember will delete a specified member. A fatal error will occur if the +// member could not be deleted. This works best when used as a deferred +// function. +func DeleteMember(t *testing.T, client *gophercloud.ServiceClient, lbID, poolID, memberID string) { + t.Logf("Attempting to delete member %s", memberID) + + if err := pools.DeleteMember(client, poolID, memberID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete member: %s", memberID) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted member %s", memberID) +} + +// DeleteLoadBalancer will delete a specified loadbalancer. A fatal error will +// occur if the loadbalancer could not be deleted. This works best when used +// as a deferred function. +func DeleteLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, lbID string) { + t.Logf("Attempting to delete loadbalancer %s", lbID) + + deleteOpts := loadbalancers.DeleteOpts{ + Cascade: false, + } + + if err := loadbalancers.Delete(client, lbID, deleteOpts).ExtractErr(); err != nil { + t.Fatalf("Unable to delete loadbalancer: %v", err) + } + + t.Logf("Waiting for loadbalancer %s to delete", lbID) + + if err := WaitForLoadBalancerState(client, lbID, "DELETED", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Loadbalancer did not delete in time.") + } + + t.Logf("Successfully deleted loadbalancer %s", lbID) +} + +// CascadeDeleteLoadBalancer will perform a cascading delete on a loadbalancer. +// A fatal error will occur if the loadbalancer could not be deleted. This works +// best when used as a deferred function. +func CascadeDeleteLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, lbID string) { + t.Logf("Attempting to cascade delete loadbalancer %s", lbID) + + deleteOpts := loadbalancers.DeleteOpts{ + Cascade: true, + } + + if err := loadbalancers.Delete(client, lbID, deleteOpts).ExtractErr(); err != nil { + t.Fatalf("Unable to cascade delete loadbalancer: %v", err) + } + + t.Logf("Waiting for loadbalancer %s to cascade delete", lbID) + + if err := WaitForLoadBalancerState(client, lbID, "DELETED", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Loadbalancer did not delete in time.") + } + + t.Logf("Successfully deleted loadbalancer %s", lbID) +} + +// DeleteMonitor will delete a specified monitor. A fatal error will occur if +// the monitor could not be deleted. This works best when used as a deferred +// function. +func DeleteMonitor(t *testing.T, client *gophercloud.ServiceClient, lbID, monitorID string) { + t.Logf("Attempting to delete monitor %s", monitorID) + + if err := monitors.Delete(client, monitorID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete monitor: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted monitor %s", monitorID) +} + +// DeletePool will delete a specified pool. A fatal error will occur if the +// pool could not be deleted. This works best when used as a deferred function. +func DeletePool(t *testing.T, client *gophercloud.ServiceClient, lbID, poolID string) { + t.Logf("Attempting to delete pool %s", poolID) + + if err := pools.Delete(client, poolID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete pool: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted pool %s", poolID) +} + +// WaitForLoadBalancerState will wait until a loadbalancer reaches a given state. +func WaitForLoadBalancerState(client *gophercloud.ServiceClient, lbID, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := loadbalancers.Get(client, lbID).Extract() + if err != nil { + if httpStatus, ok := err.(gophercloud.ErrDefault404); ok { + if httpStatus.Actual == 404 { + if status == "DELETED" { + return true, nil + } + } + } + return false, err + } + + if current.ProvisioningStatus == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancers_test.go new file mode 100644 index 000000000000..1599697a6437 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/loadbalancers_test.go @@ -0,0 +1,408 @@ +// +build acceptance networking loadbalancer loadbalancers + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" +) + +func TestLoadbalancersList(t *testing.T) { + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := loadbalancers.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list loadbalancers: %v", err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + t.Fatalf("Unable to extract loadbalancers: %v", err) + } + + for _, lb := range allLoadbalancers { + tools.PrintResource(t, lb) + } +} + +func TestLoadbalancersCRUD(t *testing.T) { + netClient, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a networking client: %v", err) + } + + lbClient, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + network, err := networking.CreateNetwork(t, netClient) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, netClient, network.ID) + + subnet, err := networking.CreateSubnet(t, netClient, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, netClient, subnet.ID) + + lb, err := CreateLoadBalancer(t, lbClient, subnet.ID) + if err != nil { + t.Fatalf("Unable to create loadbalancer: %v", err) + } + defer DeleteLoadBalancer(t, lbClient, lb.ID) + + newLB, err := loadbalancers.Get(lbClient, lb.ID).Extract() + if err != nil { + t.Fatalf("Unable to get loadbalancer: %v", err) + } + + tools.PrintResource(t, newLB) + + lbStats, err := loadbalancers.GetStats(lbClient, lb.ID).Extract() + if err != nil { + t.Fatalf("Unable to get loadbalancer's statistics: %v", err) + } + + tools.PrintResource(t, lbStats) + + // Because of the time it takes to create a loadbalancer, + // this test will include some other resources. + + // Listener + listener, err := CreateListener(t, lbClient, lb) + if err != nil { + t.Fatalf("Unable to create listener: %v", err) + } + defer DeleteListener(t, lbClient, lb.ID, listener.ID) + + updateListenerOpts := listeners.UpdateOpts{ + Description: "Some listener description", + } + _, err = listeners.Update(lbClient, listener.ID, updateListenerOpts).Extract() + if err != nil { + t.Fatalf("Unable to update listener") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newListener, err := listeners.Get(lbClient, listener.ID).Extract() + if err != nil { + t.Fatalf("Unable to get listener") + } + + tools.PrintResource(t, newListener) + + listenerStats, err := listeners.GetStats(lbClient, listener.ID).Extract() + if err != nil { + t.Fatalf("Unable to get listener's statistics: %v", err) + } + + tools.PrintResource(t, listenerStats) + + // L7 policy + policy, err := CreateL7Policy(t, lbClient, listener, lb) + if err != nil { + t.Fatalf("Unable to create l7 policy: %v", err) + } + defer DeleteL7Policy(t, lbClient, lb.ID, policy.ID) + + newDescription := "New l7 policy description" + updateL7policyOpts := l7policies.UpdateOpts{ + Description: &newDescription, + } + _, err = l7policies.Update(lbClient, policy.ID, updateL7policyOpts).Extract() + if err != nil { + t.Fatalf("Unable to update l7 policy") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newPolicy, err := l7policies.Get(lbClient, policy.ID).Extract() + if err != nil { + t.Fatalf("Unable to get l7 policy: %v", err) + } + + tools.PrintResource(t, newPolicy) + + // L7 rule + rule, err := CreateL7Rule(t, lbClient, newPolicy.ID, lb) + if err != nil { + t.Fatalf("Unable to create l7 rule: %v", err) + } + defer DeleteL7Rule(t, lbClient, lb.ID, policy.ID, rule.ID) + + allPages, err := l7policies.ListRules(lbClient, policy.ID, l7policies.ListRulesOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to get l7 rules: %v", err) + } + allRules, err := l7policies.ExtractRules(allPages) + if err != nil { + t.Fatalf("Unable to extract l7 rules: %v", err) + } + for _, rule := range allRules { + tools.PrintResource(t, rule) + } + + updateL7ruleOpts := l7policies.UpdateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images/special*", + } + _, err = l7policies.UpdateRule(lbClient, policy.ID, rule.ID, updateL7ruleOpts).Extract() + if err != nil { + t.Fatalf("Unable to update l7 rule: %v", err) + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newRule, err := l7policies.GetRule(lbClient, newPolicy.ID, rule.ID).Extract() + if err != nil { + t.Fatalf("Unable to get l7 rule: %v", err) + } + + tools.PrintResource(t, newRule) + + // Pool + pool, err := CreatePool(t, lbClient, lb) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, lbClient, lb.ID, pool.ID) + + updatePoolOpts := pools.UpdateOpts{ + Description: "Some pool description", + } + _, err = pools.Update(lbClient, pool.ID, updatePoolOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newPool, err := pools.Get(lbClient, pool.ID).Extract() + if err != nil { + t.Fatalf("Unable to get pool") + } + + tools.PrintResource(t, newPool) + + // Member + member, err := CreateMember(t, lbClient, lb, newPool, subnet.ID, subnet.CIDR) + if err != nil { + t.Fatalf("Unable to create member: %v", err) + } + defer DeleteMember(t, lbClient, lb.ID, pool.ID, member.ID) + + newWeight := tools.RandomInt(11, 100) + updateMemberOpts := pools.UpdateMemberOpts{ + Weight: newWeight, + } + _, err = pools.UpdateMember(lbClient, pool.ID, member.ID, updateMemberOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMember, err := pools.GetMember(lbClient, pool.ID, member.ID).Extract() + if err != nil { + t.Fatalf("Unable to get member") + } + + tools.PrintResource(t, newMember) + + // Monitor + monitor, err := CreateMonitor(t, lbClient, lb, newPool) + if err != nil { + t.Fatalf("Unable to create monitor: %v", err) + } + defer DeleteMonitor(t, lbClient, lb.ID, monitor.ID) + + newDelay := tools.RandomInt(20, 30) + updateMonitorOpts := monitors.UpdateOpts{ + Delay: newDelay, + } + _, err = monitors.Update(lbClient, monitor.ID, updateMonitorOpts).Extract() + if err != nil { + t.Fatalf("Unable to update monitor") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMonitor, err := monitors.Get(lbClient, monitor.ID).Extract() + if err != nil { + t.Fatalf("Unable to get monitor") + } + + tools.PrintResource(t, newMonitor) + +} + +func TestLoadbalancersCascadeCRUD(t *testing.T) { + netClient, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a networking client: %v", err) + } + + lbClient, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + network, err := networking.CreateNetwork(t, netClient) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, netClient, network.ID) + + subnet, err := networking.CreateSubnet(t, netClient, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, netClient, subnet.ID) + + lb, err := CreateLoadBalancer(t, lbClient, subnet.ID) + if err != nil { + t.Fatalf("Unable to create loadbalancer: %v", err) + } + defer CascadeDeleteLoadBalancer(t, lbClient, lb.ID) + + newLB, err := loadbalancers.Get(lbClient, lb.ID).Extract() + if err != nil { + t.Fatalf("Unable to get loadbalancer: %v", err) + } + + tools.PrintResource(t, newLB) + + // Because of the time it takes to create a loadbalancer, + // this test will include some other resources. + + // Listener + listener, err := CreateListener(t, lbClient, lb) + if err != nil { + t.Fatalf("Unable to create listener: %v", err) + } + + updateListenerOpts := listeners.UpdateOpts{ + Description: "Some listener description", + } + _, err = listeners.Update(lbClient, listener.ID, updateListenerOpts).Extract() + if err != nil { + t.Fatalf("Unable to update listener") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newListener, err := listeners.Get(lbClient, listener.ID).Extract() + if err != nil { + t.Fatalf("Unable to get listener") + } + + tools.PrintResource(t, newListener) + + // Pool + pool, err := CreatePool(t, lbClient, lb) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + + updatePoolOpts := pools.UpdateOpts{ + Description: "Some pool description", + } + _, err = pools.Update(lbClient, pool.ID, updatePoolOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newPool, err := pools.Get(lbClient, pool.ID).Extract() + if err != nil { + t.Fatalf("Unable to get pool") + } + + tools.PrintResource(t, newPool) + + // Member + member, err := CreateMember(t, lbClient, lb, newPool, subnet.ID, subnet.CIDR) + if err != nil { + t.Fatalf("Unable to create member: %v", err) + } + + newWeight := tools.RandomInt(11, 100) + updateMemberOpts := pools.UpdateMemberOpts{ + Weight: newWeight, + } + _, err = pools.UpdateMember(lbClient, pool.ID, member.ID, updateMemberOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMember, err := pools.GetMember(lbClient, pool.ID, member.ID).Extract() + if err != nil { + t.Fatalf("Unable to get member") + } + + tools.PrintResource(t, newMember) + + // Monitor + monitor, err := CreateMonitor(t, lbClient, lb, newPool) + if err != nil { + t.Fatalf("Unable to create monitor: %v", err) + } + + newDelay := tools.RandomInt(20, 30) + updateMonitorOpts := monitors.UpdateOpts{ + Delay: newDelay, + } + _, err = monitors.Update(lbClient, monitor.ID, updateMonitorOpts).Extract() + if err != nil { + t.Fatalf("Unable to update monitor") + } + + if err := WaitForLoadBalancerState(lbClient, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMonitor, err := monitors.Get(lbClient, monitor.ID).Extract() + if err != nil { + t.Fatalf("Unable to get monitor") + } + + tools.PrintResource(t, newMonitor) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/monitors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/monitors_test.go new file mode 100644 index 000000000000..93688fdb2e1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/monitors_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking loadbalancer monitors + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" +) + +func TestMonitorsList(t *testing.T) { + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := monitors.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list monitors: %v", err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + t.Fatalf("Unable to extract monitors: %v", err) + } + + for _, monitor := range allMonitors { + tools.PrintResource(t, monitor) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pkg.go new file mode 100644 index 000000000000..5ec3cc8e833b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pools_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pools_test.go new file mode 100644 index 000000000000..f7ec2a4ac491 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/loadbalancer/v2/pools_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking loadbalancer pools + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" +) + +func TestPoolsList(t *testing.T) { + client, err := clients.NewLoadBalancerV2Client() + if err != nil { + t.Fatalf("Unable to create a loadbalancer client: %v", err) + } + + allPages, err := pools.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list pools: %v", err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + t.Fatalf("Unable to extract pools: %v", err) + } + + for _, pool := range allPools { + tools.PrintResource(t, pool) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/claims_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/claims_test.go new file mode 100644 index 000000000000..4ffb9229e9ba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/claims_test.go @@ -0,0 +1,63 @@ +// +build acceptance messaging claims + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/claims" +) + +func TestCRUDClaim(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734c" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + + client, err = clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + for i := 0; i < 3; i++ { + CreateMessage(t, client, createdQueueName) + } + + clientID = "3381af92-2b9e-11e3-b191-7186130073dd" + claimedMessages, err := CreateClaim(t, client, createdQueueName) + claimIDs, _ := ExtractIDs(claimedMessages) + + tools.PrintResource(t, claimedMessages) + + updateOpts := claims.UpdateOpts{ + TTL: 600, + Grace: 500, + } + + for _, claimID := range claimIDs { + t.Logf("Attempting to update claim: %s", claimID) + updateErr := claims.Update(client, createdQueueName, claimID, updateOpts).ExtractErr() + + if updateErr != nil { + t.Fatalf("Unable to update claim %s: %v", claimID, err) + } else { + t.Logf("Successfully updated claim: %s", claimID) + } + + updatedClaim, getErr := GetClaim(t, client, createdQueueName, claimID) + if getErr != nil { + t.Fatalf("Unable to retrieve claim %s: %v", claimID, getErr) + } + + tools.PrintResource(t, updatedClaim) + DeleteClaim(t, client, createdQueueName, claimID) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/message_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/message_test.go new file mode 100644 index 000000000000..f3c558116e02 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/message_test.go @@ -0,0 +1,301 @@ +// +build acceptance messaging messages + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/messages" + "github.com/gophercloud/gophercloud/pagination" +) + +func TestListMessages(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + for i := 0; i < 3; i++ { + CreateMessage(t, client, createdQueueName) + } + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + client, err = clients.NewMessagingV2Client(clientID) + + listOpts := messages.ListOpts{} + + pager := messages.List(client, createdQueueName, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := messages.ExtractMessages(page) + if err != nil { + t.Fatalf("Unable to extract messages: %v", err) + } + + for _, message := range allMessages { + tools.PrintResource(t, message) + } + + return true, nil + }) +} + +func TestCreateMessages(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734c" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + CreateMessage(t, client, createdQueueName) +} + +func TestGetMessages(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + CreateMessage(t, client, createdQueueName) + CreateMessage(t, client, createdQueueName) + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + client, err = clients.NewMessagingV2Client(clientID) + + listOpts := messages.ListOpts{} + + var messageIDs []string + + pager := messages.List(client, createdQueueName, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := messages.ExtractMessages(page) + if err != nil { + t.Fatalf("Unable to extract messages: %v", err) + } + + for _, message := range allMessages { + messageIDs = append(messageIDs, message.ID) + } + + return true, nil + }) + + getMessageOpts := messages.GetMessagesOpts{ + IDs: messageIDs, + } + t.Logf("Attempting to get messages from queue %s with ids: %v", createdQueueName, messageIDs) + messagesList, err := messages.GetMessages(client, createdQueueName, getMessageOpts).Extract() + if err != nil { + t.Fatalf("Unable to get messages from queue: %s", createdQueueName) + } + + tools.PrintResource(t, messagesList) +} + +func TestGetMessage(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + CreateMessage(t, client, createdQueueName) + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + client, err = clients.NewMessagingV2Client(clientID) + + listOpts := messages.ListOpts{} + + var messageIDs []string + + pager := messages.List(client, createdQueueName, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := messages.ExtractMessages(page) + if err != nil { + t.Fatalf("Unable to extract messages: %v", err) + } + + for _, message := range allMessages { + messageIDs = append(messageIDs, message.ID) + } + + return true, nil + }) + + for _, messageID := range messageIDs { + t.Logf("Attempting to get message from queue %s: %s", createdQueueName, messageID) + message, getErr := messages.Get(client, createdQueueName, messageID).Extract() + if getErr != nil { + t.Fatalf("Unable to get message from queue %s: %s", createdQueueName, messageID) + } + tools.PrintResource(t, message) + } +} + +func TestDeleteMessagesIDs(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + CreateMessage(t, client, createdQueueName) + CreateMessage(t, client, createdQueueName) + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + + client, err = clients.NewMessagingV2Client(clientID) + + listOpts := messages.ListOpts{} + + var messageIDs []string + + pager := messages.List(client, createdQueueName, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := messages.ExtractMessages(page) + if err != nil { + t.Fatalf("Unable to extract messages: %v", err) + } + + for _, message := range allMessages { + messageIDs = append(messageIDs, message.ID) + tools.PrintResource(t, message) + } + + return true, nil + }) + + deleteOpts := messages.DeleteMessagesOpts{ + IDs: messageIDs, + } + + t.Logf("Attempting to delete messages: %v", messageIDs) + deleteErr := messages.DeleteMessages(client, createdQueueName, deleteOpts).ExtractErr() + if deleteErr != nil { + t.Fatalf("Unable to delete messages: %v", deleteErr) + } + + t.Logf("Attempting to list messages.") + messageList, err := ListMessages(t, client, createdQueueName) + + if len(messageList) > 0 { + t.Fatalf("Did not delete all specified messages in the queue.") + } +} + +func TestDeleteMessagesPop(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + for i := 0; i < 5; i++ { + CreateMessage(t, client, createdQueueName) + } + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + + client, err = clients.NewMessagingV2Client(clientID) + + messageList, err := ListMessages(t, client, createdQueueName) + + messagesNumber := len(messageList) + popNumber := 3 + + PopOpts := messages.PopMessagesOpts{ + Pop: popNumber, + } + + t.Logf("Attempting to Pop last %v messages.", popNumber) + popMessages, deleteErr := messages.PopMessages(client, createdQueueName, PopOpts).Extract() + if deleteErr != nil { + t.Fatalf("Unable to Pop messages: %v", deleteErr) + } + + tools.PrintResource(t, popMessages) + + messageList, err = ListMessages(t, client, createdQueueName) + if len(messageList) != messagesNumber-popNumber { + t.Fatalf("Unable to Pop specified number of messages.") + } +} + +func TestDeleteMessage(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-718613007343" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + CreateMessage(t, client, createdQueueName) + + // Use a different client/clientID in order to see messages on the Queue + clientID = "3381af92-2b9e-11e3-b191-71861300734d" + client, err = clients.NewMessagingV2Client(clientID) + + listOpts := messages.ListOpts{} + + var messageIDs []string + + pager := messages.List(client, createdQueueName, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := messages.ExtractMessages(page) + if err != nil { + t.Fatalf("Unable to extract messages: %v", err) + } + + for _, message := range allMessages { + messageIDs = append(messageIDs, message.ID) + } + + return true, nil + }) + + for _, messageID := range messageIDs { + t.Logf("Attempting to delete message from queue %s: %s", createdQueueName, messageID) + deleteOpts := messages.DeleteOpts{} + deleteErr := messages.Delete(client, createdQueueName, messageID, deleteOpts).ExtractErr() + if deleteErr != nil { + t.Fatalf("Unable to delete message from queue %s: %s", createdQueueName, messageID) + } else { + t.Logf("Successfully deleted message: %s", messageID) + } + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/messaging.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/messaging.go new file mode 100644 index 000000000000..f782770b704e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/messaging.go @@ -0,0 +1,173 @@ +package v2 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/claims" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/messages" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/queues" + "github.com/gophercloud/gophercloud/pagination" +) + +func CreateQueue(t *testing.T, client *gophercloud.ServiceClient) (string, error) { + queueName := tools.RandomString("ACPTTEST", 5) + + t.Logf("Attempting to create Queue: %s", queueName) + + createOpts := queues.CreateOpts{ + QueueName: queueName, + MaxMessagesPostSize: 262143, + DefaultMessageTTL: 3700, + DefaultMessageDelay: 25, + DeadLetterQueueMessagesTTL: 3500, + MaxClaimCount: 10, + Extra: map[string]interface{}{"description": "Test Queue for Gophercloud acceptance tests."}, + } + + createErr := queues.Create(client, createOpts).ExtractErr() + if createErr != nil { + t.Fatalf("Unable to create Queue: %v", createErr) + } + + GetQueue(t, client, queueName) + + t.Logf("Created Queue: %s", queueName) + return queueName, nil +} + +func DeleteQueue(t *testing.T, client *gophercloud.ServiceClient, queueName string) { + t.Logf("Attempting to delete Queue: %s", queueName) + err := queues.Delete(client, queueName).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete Queue %s: %v", queueName, err) + } + + t.Logf("Deleted Queue: %s", queueName) +} + +func GetQueue(t *testing.T, client *gophercloud.ServiceClient, queueName string) (queues.QueueDetails, error) { + t.Logf("Attempting to get Queue: %s", queueName) + queue, err := queues.Get(client, queueName).Extract() + if err != nil { + t.Fatalf("Unable to get Queue %s: %v", queueName, err) + } + return queue, nil +} + +func CreateShare(t *testing.T, client *gophercloud.ServiceClient, queueName string) (queues.QueueShare, error) { + t.Logf("Attempting to create share for queue: %s", queueName) + + shareOpts := queues.ShareOpts{ + Paths: []queues.SharePath{queues.PathMessages}, + Methods: []queues.ShareMethod{queues.MethodPost}, + } + + share, err := queues.Share(client, queueName, shareOpts).Extract() + + return share, err +} + +func CreateMessage(t *testing.T, client *gophercloud.ServiceClient, queueName string) (messages.ResourceList, error) { + t.Logf("Attempting to add message to Queue: %s", queueName) + createOpts := messages.BatchCreateOpts{ + messages.CreateOpts{ + TTL: 300, + Body: map[string]interface{}{"Key": tools.RandomString("ACPTTEST", 8)}, + }, + } + + resource, err := messages.Create(client, queueName, createOpts).Extract() + if err != nil { + t.Fatalf("Unable to add message to queue %s: %v", queueName, err) + } else { + t.Logf("Successfully added message to queue: %s", queueName) + } + + return resource, err +} + +func ListMessages(t *testing.T, client *gophercloud.ServiceClient, queueName string) ([]messages.Message, error) { + listOpts := messages.ListOpts{} + var allMessages []messages.Message + var listErr error + + t.Logf("Attempting to list messages on queue: %s", queueName) + pager := messages.List(client, queueName, listOpts) + err := pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, listErr = messages.ExtractMessages(page) + if listErr != nil { + t.Fatalf("Unable to extract messages: %v", listErr) + } + + for _, message := range allMessages { + tools.PrintResource(t, message) + } + + return true, nil + }) + return allMessages, err +} + +func CreateClaim(t *testing.T, client *gophercloud.ServiceClient, queueName string) ([]claims.Messages, error) { + createOpts := claims.CreateOpts{} + + t.Logf("Attempting to create claim on queue: %s", queueName) + claimedMessages, err := claims.Create(client, queueName, createOpts).Extract() + tools.PrintResource(t, claimedMessages) + if err != nil { + t.Fatalf("Unable to create claim: %v", err) + } + + return claimedMessages, err +} + +func GetClaim(t *testing.T, client *gophercloud.ServiceClient, queueName string, claimID string) (*claims.Claim, error) { + t.Logf("Attempting to get claim: %s", claimID) + claim, err := claims.Get(client, queueName, claimID).Extract() + if err != nil { + t.Fatalf("Unable to get claim: %s", claimID) + } + + return claim, err +} + +func DeleteClaim(t *testing.T, client *gophercloud.ServiceClient, queueName string, claimID string) error { + t.Logf("Attempting to delete claim: %s", claimID) + err := claims.Delete(client, queueName, claimID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete claim: %s", claimID) + } + t.Logf("Sucessfully deleted claim: %s", claimID) + + return err +} + +func ExtractIDs(claim []claims.Messages) ([]string, []string) { + var claimIDs []string + var messageID []string + + for _, msg := range claim { + parts := strings.Split(msg.Href, "?claim_id=") + if len(parts) == 2 { + pieces := strings.Split(parts[0], "/") + if len(pieces) > 0 { + messageID = append(messageID, pieces[len(pieces)-1]) + } + claimIDs = append(claimIDs, parts[1]) + } + } + encountered := map[string]bool{} + for v := range claimIDs { + encountered[claimIDs[v]] = true + } + + var uniqueClaimIDs []string + + for key := range encountered { + uniqueClaimIDs = append(uniqueClaimIDs, key) + } + return uniqueClaimIDs, messageID +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/queue_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/queue_test.go new file mode 100644 index 000000000000..166f46e83dce --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/messaging/v2/queue_test.go @@ -0,0 +1,154 @@ +// +build acceptance messaging queues + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/messaging/v2/queues" + "github.com/gophercloud/gophercloud/pagination" +) + +func TestCRUDQueues(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734d" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + createdQueue, err := queues.Get(client, createdQueueName).Extract() + + tools.PrintResource(t, createdQueue) + tools.PrintResource(t, createdQueue.Extra) + + updateOpts := queues.BatchUpdateOpts{ + queues.UpdateOpts{ + Op: "replace", + Path: "/metadata/_max_claim_count", + Value: 15, + }, + queues.UpdateOpts{ + Op: "replace", + Path: "/metadata/description", + Value: "Updated description for queues acceptance test.", + }, + } + + t.Logf("Attempting to update Queue: %s", createdQueueName) + updateResult, updateErr := queues.Update(client, createdQueueName, updateOpts).Extract() + if updateErr != nil { + t.Fatalf("Unable to update Queue %s: %v", createdQueueName, updateErr) + } + + updatedQueue, err := GetQueue(t, client, createdQueueName) + + tools.PrintResource(t, updateResult) + tools.PrintResource(t, updatedQueue) + tools.PrintResource(t, updatedQueue.Extra) +} + +func TestListQueues(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734d" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + firstQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, firstQueueName) + + secondQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, secondQueueName) + + listOpts := queues.ListOpts{ + Limit: 10, + Detailed: true, + } + + pager := queues.List(client, listOpts) + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allQueues, err := queues.ExtractQueues(page) + if err != nil { + t.Fatalf("Unable to extract Queues: %v", err) + } + + for _, queue := range allQueues { + tools.PrintResource(t, queue) + } + + return true, nil + }) +} + +func TestStatQueue(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734c" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + createdQueueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, createdQueueName) + + queueStats, err := queues.GetStats(client, createdQueueName).Extract() + if err != nil { + t.Fatalf("Unable to stat queue: %v", err) + } + + tools.PrintResource(t, queueStats) +} + +func TestShare(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734c" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + queueName, err := CreateQueue(t, client) + if err != nil { + t.Logf("Unable to create queue for share.") + } + defer DeleteQueue(t, client, queueName) + + t.Logf("Attempting to create share for queue: %s", queueName) + share, shareErr := CreateShare(t, client, queueName) + if shareErr != nil { + t.Fatalf("Unable to create share: %v", shareErr) + } + + tools.PrintResource(t, share) +} + +func TestPurge(t *testing.T) { + clientID := "3381af92-2b9e-11e3-b191-71861300734c" + + client, err := clients.NewMessagingV2Client(clientID) + if err != nil { + t.Fatalf("Unable to create a messaging service client: %v", err) + } + + queueName, err := CreateQueue(t, client) + defer DeleteQueue(t, client, queueName) + + purgeOpts := queues.PurgeOpts{ + ResourceTypes: []queues.PurgeResource{ + queues.ResourceMessages, + }, + } + + t.Logf("Attempting to purge queue: %s", queueName) + purgeErr := queues.Purge(client, queueName, purgeOpts).ExtractErr() + if purgeErr != nil { + t.Fatalf("Unable to purge queue %s: %v", queueName, purgeErr) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go new file mode 100644 index 000000000000..c6f8f261b35e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/apiversion_test.go @@ -0,0 +1,53 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions" +) + +func TestAPIVersionsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := apiversions.ListVersions(client).AllPages() + if err != nil { + t.Fatalf("Unable to list api versions: %v", err) + } + + allAPIVersions, err := apiversions.ExtractAPIVersions(allPages) + if err != nil { + t.Fatalf("Unable to extract api versions: %v", err) + } + + for _, apiVersion := range allAPIVersions { + tools.PrintResource(t, apiVersion) + } +} + +func TestAPIResourcesList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := apiversions.ListVersionResources(client, "v2.0").AllPages() + if err != nil { + t.Fatalf("Unable to list api version reosources: %v", err) + } + + allVersionResources, err := apiversions.ExtractVersionResources(allPages) + if err != nil { + t.Fatalf("Unable to extract version resources: %v", err) + } + + for _, versionResource := range allVersionResources { + tools.PrintResource(t, versionResource) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extension_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extension_test.go new file mode 100644 index 000000000000..5609e8526168 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extension_test.go @@ -0,0 +1,46 @@ +// +build acceptance networking extensions + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/common/extensions" +) + +func TestExtensionsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := extensions.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list extensions: %v", err) + } + + allExtensions, err := extensions.ExtractExtensions(allPages) + if err != nil { + t.Fatalf("Unable to extract extensions: %v", err) + } + + for _, extension := range allExtensions { + tools.PrintResource(t, extension) + } +} + +func TestExtensionGet(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + extension, err := extensions.Get(client, "router").Extract() + if err != nil { + t.Fatalf("Unable to get extension port-security: %v", err) + } + + tools.PrintResource(t, extension) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/extensions.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/extensions.go new file mode 100644 index 000000000000..06068322e133 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/extensions.go @@ -0,0 +1,142 @@ +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateExternalNetwork will create an external network. An error will be +// returned if the creation failed. +func CreateExternalNetwork(t *testing.T, client *gophercloud.ServiceClient) (*networks.Network, error) { + networkName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create external network: %s", networkName) + + adminStateUp := true + isExternal := true + + networkCreateOpts := networks.CreateOpts{ + Name: networkName, + AdminStateUp: &adminStateUp, + } + + createOpts := external.CreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + External: &isExternal, + } + + network, err := networks.Create(client, createOpts).Extract() + if err != nil { + return network, err + } + + t.Logf("Created external network: %s", networkName) + + return network, nil +} + +// CreatePortWithSecurityGroup will create a port with a security group +// attached. An error will be returned if the port could not be created. +func CreatePortWithSecurityGroup(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID, secGroupID string) (*ports.Port, error) { + portName := tools.RandomString("TESTACC-", 8) + iFalse := false + + t.Logf("Attempting to create port: %s", portName) + + createOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: &iFalse, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + SecurityGroups: &[]string{secGroupID}, + } + + port, err := ports.Create(client, createOpts).Extract() + if err != nil { + return port, err + } + + t.Logf("Successfully created port: %s", portName) + + return port, nil +} + +// CreateSecurityGroup will create a security group with a random name. +// An error will be returned if one was failed to be created. +func CreateSecurityGroup(t *testing.T, client *gophercloud.ServiceClient) (*groups.SecGroup, error) { + secGroupName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create security group: %s", secGroupName) + + createOpts := groups.CreateOpts{ + Name: secGroupName, + } + + secGroup, err := groups.Create(client, createOpts).Extract() + if err != nil { + return secGroup, err + } + + t.Logf("Created security group: %s", secGroup.ID) + + return secGroup, nil +} + +// CreateSecurityGroupRule will create a security group rule with a random name +// and random port between 80 and 99. +// An error will be returned if one was failed to be created. +func CreateSecurityGroupRule(t *testing.T, client *gophercloud.ServiceClient, secGroupID string) (*rules.SecGroupRule, error) { + t.Logf("Attempting to create security group rule in group: %s", secGroupID) + + fromPort := tools.RandomInt(80, 89) + toPort := tools.RandomInt(90, 99) + + createOpts := rules.CreateOpts{ + Direction: "ingress", + EtherType: "IPv4", + SecGroupID: secGroupID, + PortRangeMin: fromPort, + PortRangeMax: toPort, + Protocol: rules.ProtocolTCP, + } + + rule, err := rules.Create(client, createOpts).Extract() + if err != nil { + return rule, err + } + + t.Logf("Created security group rule: %s", rule.ID) + + return rule, nil +} + +// DeleteSecurityGroup will delete a security group of a specified ID. +// A fatal error will occur if the deletion failed. This works best as a +// deferred function +func DeleteSecurityGroup(t *testing.T, client *gophercloud.ServiceClient, secGroupID string) { + t.Logf("Attempting to delete security group: %s", secGroupID) + + err := groups.Delete(client, secGroupID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete security group: %v", err) + } +} + +// DeleteSecurityGroupRule will delete a security group rule of a specified ID. +// A fatal error will occur if the deletion failed. This works best as a +// deferred function +func DeleteSecurityGroupRule(t *testing.T, client *gophercloud.ServiceClient, ruleID string) { + t.Logf("Attempting to delete security group rule: %s", ruleID) + + err := rules.Delete(client, ruleID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete security group rule: %v", err) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go new file mode 100644 index 000000000000..89d378ee7c6d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/firewall_test.go @@ -0,0 +1,212 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + layer3 "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" +) + +func TestFirewallList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := firewalls.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list firewalls: %v", err) + } + + allFirewalls, err := firewalls.ExtractFirewalls(allPages) + if err != nil { + t.Fatalf("Unable to extract firewalls: %v", err) + } + + for _, firewall := range allFirewalls { + tools.PrintResource(t, firewall) + } +} + +func TestFirewallCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + router, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router.ID) + + rule, err := CreateRule(t, client) + if err != nil { + t.Fatalf("Unable to create rule: %v", err) + } + defer DeleteRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + policy, err := CreatePolicy(t, client, rule.ID) + if err != nil { + t.Fatalf("Unable to create policy: %v", err) + } + defer DeletePolicy(t, client, policy.ID) + + tools.PrintResource(t, policy) + + firewall, err := CreateFirewall(t, client, policy.ID) + if err != nil { + t.Fatalf("Unable to create firewall: %v", err) + } + defer DeleteFirewall(t, client, firewall.ID) + + tools.PrintResource(t, firewall) + + updateOpts := firewalls.UpdateOpts{ + PolicyID: policy.ID, + Description: "Some firewall description", + } + + _, err = firewalls.Update(client, firewall.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update firewall: %v", err) + } + + newFirewall, err := firewalls.Get(client, firewall.ID).Extract() + if err != nil { + t.Fatalf("Unable to get firewall: %v", err) + } + + tools.PrintResource(t, newFirewall) +} + +func TestFirewallCRUDRouter(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + router, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router.ID) + + rule, err := CreateRule(t, client) + if err != nil { + t.Fatalf("Unable to create rule: %v", err) + } + defer DeleteRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + policy, err := CreatePolicy(t, client, rule.ID) + if err != nil { + t.Fatalf("Unable to create policy: %v", err) + } + defer DeletePolicy(t, client, policy.ID) + + tools.PrintResource(t, policy) + + firewall, err := CreateFirewallOnRouter(t, client, policy.ID, router.ID) + if err != nil { + t.Fatalf("Unable to create firewall: %v", err) + } + defer DeleteFirewall(t, client, firewall.ID) + + tools.PrintResource(t, firewall) + + router2, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router2.ID) + + firewallUpdateOpts := firewalls.UpdateOpts{ + PolicyID: policy.ID, + Description: "Some firewall description", + } + + updateOpts := routerinsertion.UpdateOptsExt{ + firewallUpdateOpts, + []string{router2.ID}, + } + + _, err = firewalls.Update(client, firewall.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update firewall: %v", err) + } + + newFirewall, err := firewalls.Get(client, firewall.ID).Extract() + if err != nil { + t.Fatalf("Unable to get firewall: %v", err) + } + + tools.PrintResource(t, newFirewall) +} + +func TestFirewallCRUDRemoveRouter(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + router, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router.ID) + + rule, err := CreateRule(t, client) + if err != nil { + t.Fatalf("Unable to create rule: %v", err) + } + defer DeleteRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + policy, err := CreatePolicy(t, client, rule.ID) + if err != nil { + t.Fatalf("Unable to create policy: %v", err) + } + defer DeletePolicy(t, client, policy.ID) + + tools.PrintResource(t, policy) + + firewall, err := CreateFirewallOnRouter(t, client, policy.ID, router.ID) + if err != nil { + t.Fatalf("Unable to create firewall: %v", err) + } + defer DeleteFirewall(t, client, firewall.ID) + + tools.PrintResource(t, firewall) + + firewallUpdateOpts := firewalls.UpdateOpts{ + PolicyID: policy.ID, + Description: "Some firewall description", + } + + updateOpts := routerinsertion.UpdateOptsExt{ + firewallUpdateOpts, + []string{}, + } + + _, err = firewalls.Update(client, firewall.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update firewall: %v", err) + } + + newFirewall, err := firewalls.Get(client, firewall.ID).Extract() + if err != nil { + t.Fatalf("Unable to get firewall: %v", err) + } + + tools.PrintResource(t, newFirewall) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/fwaas.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/fwaas.go new file mode 100644 index 000000000000..83aa1a400fc0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/fwaas.go @@ -0,0 +1,203 @@ +package fwaas + +import ( + "fmt" + "strconv" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" +) + +// CreateFirewall will create a Firewaill with a random name and a specified +// policy ID. An error will be returned if the firewall could not be created. +func CreateFirewall(t *testing.T, client *gophercloud.ServiceClient, policyID string) (*firewalls.Firewall, error) { + firewallName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create firewall %s", firewallName) + + iTrue := true + createOpts := firewalls.CreateOpts{ + Name: firewallName, + PolicyID: policyID, + AdminStateUp: &iTrue, + } + + firewall, err := firewalls.Create(client, createOpts).Extract() + if err != nil { + return firewall, err + } + + t.Logf("Waiting for firewall to become active.") + if err := WaitForFirewallState(client, firewall.ID, "ACTIVE", 60); err != nil { + return firewall, err + } + + t.Logf("Successfully created firewall %s", firewallName) + + return firewall, nil +} + +// CreateFirewallOnRouter will create a Firewall with a random name and a +// specified policy ID attached to a specified Router. An error will be +// returned if the firewall could not be created. +func CreateFirewallOnRouter(t *testing.T, client *gophercloud.ServiceClient, policyID string, routerID string) (*firewalls.Firewall, error) { + firewallName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create firewall %s", firewallName) + + firewallCreateOpts := firewalls.CreateOpts{ + Name: firewallName, + PolicyID: policyID, + } + + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{routerID}, + } + + firewall, err := firewalls.Create(client, createOpts).Extract() + if err != nil { + return firewall, err + } + + t.Logf("Waiting for firewall to become active.") + if err := WaitForFirewallState(client, firewall.ID, "ACTIVE", 60); err != nil { + return firewall, err + } + + t.Logf("Successfully created firewall %s", firewallName) + + return firewall, nil +} + +// CreatePolicy will create a Firewall Policy with a random name and given +// rule. An error will be returned if the rule could not be created. +func CreatePolicy(t *testing.T, client *gophercloud.ServiceClient, ruleID string) (*policies.Policy, error) { + policyName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create policy %s", policyName) + + createOpts := policies.CreateOpts{ + Name: policyName, + Rules: []string{ + ruleID, + }, + } + + policy, err := policies.Create(client, createOpts).Extract() + if err != nil { + return policy, err + } + + t.Logf("Successfully created policy %s", policyName) + + return policy, nil +} + +// CreateRule will create a Firewall Rule with a random source address and +//source port, destination address and port. An error will be returned if +// the rule could not be created. +func CreateRule(t *testing.T, client *gophercloud.ServiceClient) (*rules.Rule, error) { + ruleName := tools.RandomString("TESTACC-", 8) + sourceAddress := fmt.Sprintf("192.168.1.%d", tools.RandomInt(1, 100)) + sourcePort := strconv.Itoa(tools.RandomInt(1, 100)) + destinationAddress := fmt.Sprintf("192.168.2.%d", tools.RandomInt(1, 100)) + destinationPort := strconv.Itoa(tools.RandomInt(1, 100)) + + t.Logf("Attempting to create rule %s with source %s:%s and destination %s:%s", + ruleName, sourceAddress, sourcePort, destinationAddress, destinationPort) + + createOpts := rules.CreateOpts{ + Name: ruleName, + Protocol: rules.ProtocolTCP, + Action: "allow", + SourceIPAddress: sourceAddress, + SourcePort: sourcePort, + DestinationIPAddress: destinationAddress, + DestinationPort: destinationPort, + } + + rule, err := rules.Create(client, createOpts).Extract() + if err != nil { + return rule, err + } + + t.Logf("Rule %s successfully created", ruleName) + + return rule, nil +} + +// DeleteFirewall will delete a firewall with a specified ID. A fatal error +// will occur if the delete was not successful. This works best when used as +// a deferred function. +func DeleteFirewall(t *testing.T, client *gophercloud.ServiceClient, firewallID string) { + t.Logf("Attempting to delete firewall: %s", firewallID) + + err := firewalls.Delete(client, firewallID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete firewall %s: %v", firewallID, err) + } + + t.Logf("Waiting for firewall to delete.") + if err := WaitForFirewallState(client, firewallID, "DELETED", 60); err != nil { + t.Logf("Unable to delete firewall: %s", firewallID) + } + + t.Logf("Firewall deleted: %s", firewallID) +} + +// DeletePolicy will delete a policy with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeletePolicy(t *testing.T, client *gophercloud.ServiceClient, policyID string) { + t.Logf("Attempting to delete policy: %s", policyID) + + err := policies.Delete(client, policyID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete policy %s: %v", policyID, err) + } + + t.Logf("Deleted policy: %s", policyID) +} + +// DeleteRule will delete a rule with a specified ID. A fatal error will occur +// if the delete was not successful. This works best when used as a deferred +// function. +func DeleteRule(t *testing.T, client *gophercloud.ServiceClient, ruleID string) { + t.Logf("Attempting to delete rule: %s", ruleID) + + err := rules.Delete(client, ruleID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete rule %s: %v", ruleID, err) + } + + t.Logf("Deleted rule: %s", ruleID) +} + +// WaitForFirewallState will wait until a firewall reaches a given state. +func WaitForFirewallState(client *gophercloud.ServiceClient, firewallID, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := firewalls.Get(client, firewallID).Extract() + if err != nil { + if httpStatus, ok := err.(gophercloud.ErrDefault404); ok { + if httpStatus.Actual == 404 { + if status == "DELETED" { + return true, nil + } + } + } + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go new file mode 100644 index 000000000000..206bf3313a8d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/pkg.go @@ -0,0 +1 @@ +package fwaas diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go new file mode 100644 index 000000000000..3220d821a3c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/policy_test.go @@ -0,0 +1,71 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" +) + +func TestPolicyList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := policies.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list policies: %v", err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + t.Fatalf("Unable to extract policies: %v", err) + } + + for _, policy := range allPolicies { + tools.PrintResource(t, policy) + } +} + +func TestPolicyCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + rule, err := CreateRule(t, client) + if err != nil { + t.Fatalf("Unable to create rule: %v", err) + } + defer DeleteRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + policy, err := CreatePolicy(t, client, rule.ID) + if err != nil { + t.Fatalf("Unable to create policy: %v", err) + } + defer DeletePolicy(t, client, policy.ID) + + tools.PrintResource(t, policy) + + updateOpts := policies.UpdateOpts{ + Description: "Some policy description", + } + + _, err = policies.Update(client, policy.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update policy: %v", err) + } + + newPolicy, err := policies.Get(client, policy.ID).Extract() + if err != nil { + t.Fatalf("Unable to get policy: %v", err) + } + + tools.PrintResource(t, newPolicy) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go new file mode 100644 index 000000000000..4521a60b81e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/fwaas/rule_test.go @@ -0,0 +1,64 @@ +// +build acceptance networking fwaas + +package fwaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" +) + +func TestRuleList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := rules.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list rules: %v", err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + t.Fatalf("Unable to extract rules: %v", err) + } + + for _, rule := range allRules { + tools.PrintResource(t, rule) + } +} + +func TestRuleCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + rule, err := CreateRule(t, client) + if err != nil { + t.Fatalf("Unable to create rule: %v", err) + } + defer DeleteRule(t, client, rule.ID) + + tools.PrintResource(t, rule) + + ruleDescription := "Some rule description" + updateOpts := rules.UpdateOpts{ + Description: &ruleDescription, + } + + _, err = rules.Update(client, rule.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update rule: %v", err) + } + + newRule, err := rules.Get(client, rule.ID).Extract() + if err != nil { + t.Fatalf("Unable to get rule: %v", err) + } + + tools.PrintResource(t, newRule) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/floatingips_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/floatingips_test.go new file mode 100644 index 000000000000..3d2073c1cda8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/floatingips_test.go @@ -0,0 +1,147 @@ +// +build acceptance networking layer3 floatingips + +package layer3 + +import ( + "os" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func TestLayer3FloatingIPsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + listOpts := floatingips.ListOpts{ + Status: "DOWN", + } + allPages, err := floatingips.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list floating IPs: %v", err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + t.Fatalf("Unable to extract floating IPs: %v", err) + } + + for _, fip := range allFIPs { + tools.PrintResource(t, fip) + } +} + +func TestLayer3FloatingIPsCreateDelete(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to get choices: %v", err) + } + + netid, err := networks.IDFromName(client, choices.NetworkName) + if err != nil { + t.Fatalf("Unable to find network id: %v", err) + } + + subnet, err := networking.CreateSubnet(t, client, netid) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + router, err := CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer DeleteRouter(t, client, router.ID) + + port, err := networking.CreatePort(t, client, netid, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + + _, err = CreateRouterInterface(t, client, port.ID, router.ID) + if err != nil { + t.Fatalf("Unable to create router interface: %v", err) + } + defer DeleteRouterInterface(t, client, port.ID, router.ID) + + fip, err := CreateFloatingIP(t, client, choices.ExternalNetworkID, port.ID) + if err != nil { + t.Fatalf("Unable to create floating IP: %v", err) + } + defer DeleteFloatingIP(t, client, fip.ID) + + newFip, err := floatingips.Get(client, fip.ID).Extract() + if err != nil { + t.Fatalf("Unable to get floating ip: %v", err) + } + + tools.PrintResource(t, newFip) + + // Disassociate the floating IP + updateOpts := floatingips.UpdateOpts{ + PortID: nil, + } + + newFip, err = floatingips.Update(client, fip.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to disassociate floating IP: %v", err) + } +} + +func TestLayer3FloatingIPsCreateDeleteBySubnetID(t *testing.T) { + username := os.Getenv("OS_USERNAME") + if username != "admin" { + t.Skip("must be admin to run this test") + } + + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to get choices: %v", err) + } + + listOpts := subnets.ListOpts{ + NetworkID: choices.ExternalNetworkID, + } + + subnetPages, err := subnets.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list subnets: %v", err) + } + + allSubnets, err := subnets.ExtractSubnets(subnetPages) + if err != nil { + t.Fatalf("Unable to extract subnets: %v", err) + } + + createOpts := floatingips.CreateOpts{ + FloatingNetworkID: choices.ExternalNetworkID, + SubnetID: allSubnets[0].ID, + } + + fip, err := floatingips.Create(client, createOpts).Extract() + if err != nil { + t.Fatalf("Unable to create floating IP: %v", err) + } + + tools.PrintResource(t, fip) + + DeleteFloatingIP(t, client, fip.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/layer3.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/layer3.go new file mode 100644 index 000000000000..3d017be889a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/layer3.go @@ -0,0 +1,250 @@ +package layer3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateFloatingIP creates a floating IP on a given network and port. An error +// will be returned if the creation failed. +func CreateFloatingIP(t *testing.T, client *gophercloud.ServiceClient, networkID, portID string) (*floatingips.FloatingIP, error) { + t.Logf("Attempting to create floating IP on port: %s", portID) + + createOpts := &floatingips.CreateOpts{ + FloatingNetworkID: networkID, + PortID: portID, + } + + floatingIP, err := floatingips.Create(client, createOpts).Extract() + if err != nil { + return floatingIP, err + } + + t.Logf("Created floating IP.") + + return floatingIP, err +} + +// CreateExternalRouter creates a router on the external network. This requires +// the OS_EXTGW_ID environment variable to be set. An error is returned if the +// creation failed. +func CreateExternalRouter(t *testing.T, client *gophercloud.ServiceClient) (*routers.Router, error) { + var router *routers.Router + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + return router, err + } + + routerName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create external router: %s", routerName) + + adminStateUp := true + enableSNAT := false + gatewayInfo := routers.GatewayInfo{ + NetworkID: choices.ExternalNetworkID, + EnableSNAT: &enableSNAT, + } + + createOpts := routers.CreateOpts{ + Name: routerName, + AdminStateUp: &adminStateUp, + GatewayInfo: &gatewayInfo, + } + + router, err = routers.Create(client, createOpts).Extract() + if err != nil { + return router, err + } + + if err := WaitForRouterToCreate(client, router.ID, 60); err != nil { + return router, err + } + + t.Logf("Created router: %s", routerName) + + return router, nil +} + +// CreateRouter creates a router on a specified Network ID. An error will be +// returned if the creation failed. +func CreateRouter(t *testing.T, client *gophercloud.ServiceClient, networkID string) (*routers.Router, error) { + routerName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create router: %s", routerName) + + adminStateUp := true + gatewayInfo := routers.GatewayInfo{ + NetworkID: networkID, + } + + createOpts := routers.CreateOpts{ + Name: routerName, + AdminStateUp: &adminStateUp, + GatewayInfo: &gatewayInfo, + } + + router, err := routers.Create(client, createOpts).Extract() + if err != nil { + return router, err + } + + if err := WaitForRouterToCreate(client, router.ID, 60); err != nil { + return router, err + } + + t.Logf("Created router: %s", routerName) + + return router, nil +} + +// CreateRouterInterface will attach a subnet to a router. An error will be +// returned if the operation fails. +func CreateRouterInterface(t *testing.T, client *gophercloud.ServiceClient, portID, routerID string) (*routers.InterfaceInfo, error) { + t.Logf("Attempting to add port %s to router %s", portID, routerID) + + aiOpts := routers.AddInterfaceOpts{ + PortID: portID, + } + + iface, err := routers.AddInterface(client, routerID, aiOpts).Extract() + if err != nil { + return iface, err + } + + if err := WaitForRouterInterfaceToAttach(client, portID, 60); err != nil { + return iface, err + } + + t.Logf("Successfully added port %s to router %s", portID, routerID) + return iface, nil +} + +// DeleteRouter deletes a router of a specified ID. A fatal error will occur +// if the deletion failed. This works best when used as a deferred function. +func DeleteRouter(t *testing.T, client *gophercloud.ServiceClient, routerID string) { + t.Logf("Attempting to delete router: %s", routerID) + + err := routers.Delete(client, routerID).ExtractErr() + if err != nil { + t.Fatalf("Error deleting router: %v", err) + } + + if err := WaitForRouterToDelete(client, routerID, 60); err != nil { + t.Fatalf("Error waiting for router to delete: %v", err) + } + + t.Logf("Deleted router: %s", routerID) +} + +// DeleteRouterInterface will detach a subnet to a router. A fatal error will +// occur if the deletion failed. This works best when used as a deferred +// function. +func DeleteRouterInterface(t *testing.T, client *gophercloud.ServiceClient, portID, routerID string) { + t.Logf("Attempting to detach port %s from router %s", portID, routerID) + + riOpts := routers.RemoveInterfaceOpts{ + PortID: portID, + } + + _, err := routers.RemoveInterface(client, routerID, riOpts).Extract() + if err != nil { + t.Fatalf("Failed to detach port %s from router %s", portID, routerID) + } + + if err := WaitForRouterInterfaceToDetach(client, portID, 60); err != nil { + t.Fatalf("Failed to wait for port %s to detach from router %s", portID, routerID) + } + + t.Logf("Successfully detached port %s from router %s", portID, routerID) +} + +// DeleteFloatingIP deletes a floatingIP of a specified ID. A fatal error will +// occur if the deletion failed. This works best when used as a deferred +// function. +func DeleteFloatingIP(t *testing.T, client *gophercloud.ServiceClient, floatingIPID string) { + t.Logf("Attempting to delete floating IP: %s", floatingIPID) + + err := floatingips.Delete(client, floatingIPID).ExtractErr() + if err != nil { + t.Fatalf("Failed to delete floating IP: %v", err) + } + + t.Logf("Deleted floating IP: %s", floatingIPID) +} + +func WaitForRouterToCreate(client *gophercloud.ServiceClient, routerID string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + r, err := routers.Get(client, routerID).Extract() + if err != nil { + return false, err + } + + if r.Status == "ACTIVE" { + return true, nil + } + + return false, nil + }) +} + +func WaitForRouterToDelete(client *gophercloud.ServiceClient, routerID string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + _, err := routers.Get(client, routerID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return true, nil + } + + return false, err + } + + return false, nil + }) +} + +func WaitForRouterInterfaceToAttach(client *gophercloud.ServiceClient, routerInterfaceID string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + r, err := ports.Get(client, routerInterfaceID).Extract() + if err != nil { + return false, err + } + + if r.Status == "ACTIVE" { + return true, nil + } + + return false, nil + }) +} + +func WaitForRouterInterfaceToDetach(client *gophercloud.ServiceClient, routerInterfaceID string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + r, err := ports.Get(client, routerInterfaceID).Extract() + if err != nil { + if _, ok := err.(gophercloud.ErrDefault404); ok { + return true, nil + } + + if errCode, ok := err.(gophercloud.ErrUnexpectedResponseCode); ok { + if errCode.Actual == 409 { + return false, nil + } + } + + return false, err + } + + if r.Status == "ACTIVE" { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/routers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/routers_test.go new file mode 100644 index 000000000000..4be922e9f560 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3/routers_test.go @@ -0,0 +1,119 @@ +// +build acceptance networking layer3 router + +package layer3 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +func TestLayer3RouterList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + listOpts := routers.ListOpts{} + allPages, err := routers.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list routers: %v", err) + } + + allRouters, err := routers.ExtractRouters(allPages) + if err != nil { + t.Fatalf("Unable to extract routers: %v", err) + } + + for _, router := range allRouters { + tools.PrintResource(t, router) + } +} + +func TestLayer3ExternalRouterCreateDelete(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + router, err := CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer DeleteRouter(t, client, router.ID) + + tools.PrintResource(t, router) + + newName := tools.RandomString("TESTACC-", 8) + updateOpts := routers.UpdateOpts{ + Name: newName, + } + + _, err = routers.Update(client, router.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update router: %v", err) + } + + newRouter, err := routers.Get(client, router.ID).Extract() + if err != nil { + t.Fatalf("Unable to get router: %v", err) + } + + tools.PrintResource(t, newRouter) +} + +func TestLayer3RouterInterface(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a compute client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to get choices: %v", err) + } + + netid, err := networks.IDFromName(client, choices.NetworkName) + if err != nil { + t.Fatalf("Unable to find network id: %v", err) + } + + subnet, err := networking.CreateSubnet(t, client, netid) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + router, err := CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer DeleteRouter(t, client, router.ID) + + aiOpts := routers.AddInterfaceOpts{ + SubnetID: subnet.ID, + } + + iface, err := routers.AddInterface(client, router.ID, aiOpts).Extract() + if err != nil { + t.Fatalf("Failed to add interface to router: %v", err) + } + + tools.PrintResource(t, router) + tools.PrintResource(t, iface) + + riOpts := routers.RemoveInterfaceOpts{ + SubnetID: subnet.ID, + } + + _, err = routers.RemoveInterface(client, router.ID, riOpts).Extract() + if err != nil { + t.Fatalf("Failed to remove interface from router: %v", err) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/lbaas.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/lbaas.go new file mode 100644 index 000000000000..b31d3e5b42d4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/lbaas.go @@ -0,0 +1,160 @@ +package lbaas + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" +) + +// CreateMember will create a load balancer member in a specified pool on a +// random port. An error will be returned if the member could not be created. +func CreateMember(t *testing.T, client *gophercloud.ServiceClient, poolID string) (*members.Member, error) { + protocolPort := tools.RandomInt(100, 1000) + address := tools.RandomInt(2, 200) + t.Logf("Attempting to create member in port %d", protocolPort) + + createOpts := members.CreateOpts{ + PoolID: poolID, + ProtocolPort: protocolPort, + Address: fmt.Sprintf("192.168.1.%d", address), + } + + member, err := members.Create(client, createOpts).Extract() + if err != nil { + return member, err + } + + t.Logf("Successfully created member %s", member.ID) + + return member, nil +} + +// CreateMonitor will create a monitor with a random name for a specific pool. +// An error will be returned if the monitor could not be created. +func CreateMonitor(t *testing.T, client *gophercloud.ServiceClient) (*monitors.Monitor, error) { + t.Logf("Attempting to create monitor.") + + createOpts := monitors.CreateOpts{ + Type: monitors.TypePING, + Delay: 90, + Timeout: 60, + MaxRetries: 10, + AdminStateUp: gophercloud.Enabled, + } + + monitor, err := monitors.Create(client, createOpts).Extract() + if err != nil { + return monitor, err + } + + t.Logf("Successfully created monitor %s", monitor.ID) + + return monitor, nil +} + +// CreatePool will create a pool with a random name. An error will be returned +// if the pool could not be deleted. +func CreatePool(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*pools.Pool, error) { + poolName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create pool %s", poolName) + + createOpts := pools.CreateOpts{ + Name: poolName, + SubnetID: subnetID, + Protocol: pools.ProtocolTCP, + LBMethod: pools.LBMethodRoundRobin, + } + + pool, err := pools.Create(client, createOpts).Extract() + if err != nil { + return pool, err + } + + t.Logf("Successfully created pool %s", poolName) + + return pool, nil +} + +// CreateVIP will create a vip with a random name and a random port in a +// specified subnet and pool. An error will be returned if the vip could +// not be created. +func CreateVIP(t *testing.T, client *gophercloud.ServiceClient, subnetID, poolID string) (*vips.VirtualIP, error) { + vipName := tools.RandomString("TESTACCT-", 8) + vipPort := tools.RandomInt(100, 10000) + + t.Logf("Attempting to create VIP %s", vipName) + + createOpts := vips.CreateOpts{ + Name: vipName, + SubnetID: subnetID, + PoolID: poolID, + Protocol: "TCP", + ProtocolPort: vipPort, + } + + vip, err := vips.Create(client, createOpts).Extract() + if err != nil { + return vip, err + } + + t.Logf("Successfully created vip %s", vipName) + + return vip, nil +} + +// DeleteMember will delete a specified member. A fatal error will occur if +// the member could not be deleted. This works best when used as a deferred +// function. +func DeleteMember(t *testing.T, client *gophercloud.ServiceClient, memberID string) { + t.Logf("Attempting to delete member %s", memberID) + + if err := members.Delete(client, memberID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete member: %v", err) + } + + t.Logf("Successfully deleted member %s", memberID) +} + +// DeleteMonitor will delete a specified monitor. A fatal error will occur if +// the monitor could not be deleted. This works best when used as a deferred +// function. +func DeleteMonitor(t *testing.T, client *gophercloud.ServiceClient, monitorID string) { + t.Logf("Attempting to delete monitor %s", monitorID) + + if err := monitors.Delete(client, monitorID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete monitor: %v", err) + } + + t.Logf("Successfully deleted monitor %s", monitorID) +} + +// DeletePool will delete a specified pool. A fatal error will occur if the +// pool could not be deleted. This works best when used as a deferred function. +func DeletePool(t *testing.T, client *gophercloud.ServiceClient, poolID string) { + t.Logf("Attempting to delete pool %s", poolID) + + if err := pools.Delete(client, poolID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete pool: %v", err) + } + + t.Logf("Successfully deleted pool %s", poolID) +} + +// DeleteVIP will delete a specified vip. A fatal error will occur if the vip +// could not be deleted. This works best when used as a deferred function. +func DeleteVIP(t *testing.T, client *gophercloud.ServiceClient, vipID string) { + t.Logf("Attempting to delete vip %s", vipID) + + if err := vips.Delete(client, vipID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete vip: %v", err) + } + + t.Logf("Successfully deleted vip %s", vipID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/members_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/members_test.go new file mode 100644 index 000000000000..75dec83986d7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/members_test.go @@ -0,0 +1,83 @@ +// +build acceptance networking lbaas member + +package lbaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" +) + +func TestMembersList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := members.List(client, members.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to list members: %v", err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + t.Fatalf("Unable to extract members: %v", err) + } + + for _, member := range allMembers { + tools.PrintResource(t, member) + } +} + +func TestMembersCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + pool, err := CreatePool(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, client, pool.ID) + + member, err := CreateMember(t, client, pool.ID) + if err != nil { + t.Fatalf("Unable to create member: %v", err) + } + defer DeleteMember(t, client, member.ID) + + tools.PrintResource(t, member) + + updateOpts := members.UpdateOpts{ + AdminStateUp: gophercloud.Enabled, + } + + _, err = members.Update(client, member.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update member: %v") + } + + newMember, err := members.Get(client, member.ID).Extract() + if err != nil { + t.Fatalf("Unable to get member: %v") + } + + tools.PrintResource(t, newMember) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitors_test.go new file mode 100644 index 000000000000..56b413afb0df --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/monitors_test.go @@ -0,0 +1,63 @@ +// +build acceptance networking lbaas monitors + +package lbaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" +) + +func TestMonitorsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := monitors.List(client, monitors.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to list monitors: %v", err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + t.Fatalf("Unable to extract monitors: %v", err) + } + + for _, monitor := range allMonitors { + tools.PrintResource(t, monitor) + } +} + +func TestMonitorsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + monitor, err := CreateMonitor(t, client) + if err != nil { + t.Fatalf("Unable to create monitor: %v", err) + } + defer DeleteMonitor(t, client, monitor.ID) + + tools.PrintResource(t, monitor) + + updateOpts := monitors.UpdateOpts{ + Delay: 999, + } + + _, err = monitors.Update(client, monitor.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update monitor: %v") + } + + newMonitor, err := monitors.Get(client, monitor.ID).Extract() + if err != nil { + t.Fatalf("Unable to get monitor: %v") + } + + tools.PrintResource(t, newMonitor) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go new file mode 100644 index 000000000000..f5a7df7b7515 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pkg.go @@ -0,0 +1 @@ +package lbaas diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pools_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pools_test.go new file mode 100644 index 000000000000..b53237c0e374 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/pools_test.go @@ -0,0 +1,118 @@ +// +build acceptance networking lbaas pool + +package lbaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" +) + +func TestPoolsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := pools.List(client, pools.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to list pools: %v", err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + t.Fatalf("Unable to extract pools: %v", err) + } + + for _, pool := range allPools { + tools.PrintResource(t, pool) + } +} + +func TestPoolsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + pool, err := CreatePool(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, client, pool.ID) + + tools.PrintResource(t, pool) + + updateOpts := pools.UpdateOpts{ + LBMethod: pools.LBMethodLeastConnections, + } + + _, err = pools.Update(client, pool.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool: %v") + } + + newPool, err := pools.Get(client, pool.ID).Extract() + if err != nil { + t.Fatalf("Unable to get pool: %v") + } + + tools.PrintResource(t, newPool) +} + +func TestPoolsMonitors(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + pool, err := CreatePool(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, client, pool.ID) + + monitor, err := CreateMonitor(t, client) + if err != nil { + t.Fatalf("Unable to create monitor: %v", err) + } + defer DeleteMonitor(t, client, monitor.ID) + + t.Logf("Associating monitor %s with pool %s", monitor.ID, pool.ID) + if res := pools.AssociateMonitor(client, pool.ID, monitor.ID); res.Err != nil { + t.Fatalf("Unable to associate monitor to pool") + } + + t.Logf("Disassociating monitor %s with pool %s", monitor.ID, pool.ID) + if res := pools.DisassociateMonitor(client, pool.ID, monitor.ID); res.Err != nil { + t.Fatalf("Unable to disassociate monitor from pool") + } + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vips_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vips_test.go new file mode 100644 index 000000000000..a63dc63c6cee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas/vips_test.go @@ -0,0 +1,83 @@ +// +build acceptance networking lbaas vip + +package lbaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" +) + +func TestVIPsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := vips.List(client, vips.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to list vips: %v", err) + } + + allVIPs, err := vips.ExtractVIPs(allPages) + if err != nil { + t.Fatalf("Unable to extract vips: %v", err) + } + + for _, vip := range allVIPs { + tools.PrintResource(t, vip) + } +} + +func TestVIPsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + pool, err := CreatePool(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, client, pool.ID) + + vip, err := CreateVIP(t, client, subnet.ID, pool.ID) + if err != nil { + t.Fatalf("Unable to create vip: %v", err) + } + defer DeleteVIP(t, client, vip.ID) + + tools.PrintResource(t, vip) + + connLimit := 100 + updateOpts := vips.UpdateOpts{ + ConnLimit: &connLimit, + } + + _, err = vips.Update(client, vip.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update vip: %v") + } + + newVIP, err := vips.Get(client, vip.ID).Extract() + if err != nil { + t.Fatalf("Unable to get vip: %v") + } + + tools.PrintResource(t, newVIP) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/lbaas_v2.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/lbaas_v2.go new file mode 100644 index 000000000000..093f835b9b26 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/lbaas_v2.go @@ -0,0 +1,282 @@ +package lbaas_v2 + +import ( + "fmt" + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +const loadbalancerActiveTimeoutSeconds = 300 +const loadbalancerDeleteTimeoutSeconds = 300 + +// CreateListener will create a listener for a given load balancer on a random +// port with a random name. An error will be returned if the listener could not +// be created. +func CreateListener(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer) (*listeners.Listener, error) { + listenerName := tools.RandomString("TESTACCT-", 8) + listenerPort := tools.RandomInt(1, 100) + + t.Logf("Attempting to create listener %s on port %d", listenerName, listenerPort) + + createOpts := listeners.CreateOpts{ + Name: listenerName, + LoadbalancerID: lb.ID, + Protocol: "TCP", + ProtocolPort: listenerPort, + } + + listener, err := listeners.Create(client, createOpts).Extract() + if err != nil { + return listener, err + } + + t.Logf("Successfully created listener %s", listenerName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return listener, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return listener, nil +} + +// CreateLoadBalancer will create a load balancer with a random name on a given +// subnet. An error will be returned if the loadbalancer could not be created. +func CreateLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*loadbalancers.LoadBalancer, error) { + lbName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create loadbalancer %s on subnet %s", lbName, subnetID) + + createOpts := loadbalancers.CreateOpts{ + Name: lbName, + VipSubnetID: subnetID, + AdminStateUp: gophercloud.Enabled, + } + + lb, err := loadbalancers.Create(client, createOpts).Extract() + if err != nil { + return lb, err + } + + t.Logf("Successfully created loadbalancer %s on subnet %s", lbName, subnetID) + t.Logf("Waiting for loadbalancer %s to become active", lbName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return lb, err + } + + t.Logf("LoadBalancer %s is active", lbName) + + return lb, nil +} + +// CreateMember will create a member with a random name, port, address, and +// weight. An error will be returned if the member could not be created. +func CreateMember(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer, pool *pools.Pool, subnetID, subnetCIDR string) (*pools.Member, error) { + memberName := tools.RandomString("TESTACCT-", 8) + memberPort := tools.RandomInt(100, 1000) + memberWeight := tools.RandomInt(1, 10) + + cidrParts := strings.Split(subnetCIDR, "/") + subnetParts := strings.Split(cidrParts[0], ".") + memberAddress := fmt.Sprintf("%s.%s.%s.%d", subnetParts[0], subnetParts[1], subnetParts[2], tools.RandomInt(10, 100)) + + t.Logf("Attempting to create member %s", memberName) + + createOpts := pools.CreateMemberOpts{ + Name: memberName, + ProtocolPort: memberPort, + Weight: memberWeight, + Address: memberAddress, + SubnetID: subnetID, + } + + t.Logf("Member create opts: %#v", createOpts) + + member, err := pools.CreateMember(client, pool.ID, createOpts).Extract() + if err != nil { + return member, err + } + + t.Logf("Successfully created member %s", memberName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return member, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return member, nil +} + +// CreateMonitor will create a monitor with a random name for a specific pool. +// An error will be returned if the monitor could not be created. +func CreateMonitor(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer, pool *pools.Pool) (*monitors.Monitor, error) { + monitorName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create monitor %s", monitorName) + + createOpts := monitors.CreateOpts{ + PoolID: pool.ID, + Name: monitorName, + Delay: 10, + Timeout: 5, + MaxRetries: 5, + Type: "PING", + } + + monitor, err := monitors.Create(client, createOpts).Extract() + if err != nil { + return monitor, err + } + + t.Logf("Successfully created monitor: %s", monitorName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return monitor, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return monitor, nil +} + +// CreatePool will create a pool with a random name with a specified listener +// and loadbalancer. An error will be returned if the pool could not be +// created. +func CreatePool(t *testing.T, client *gophercloud.ServiceClient, lb *loadbalancers.LoadBalancer) (*pools.Pool, error) { + poolName := tools.RandomString("TESTACCT-", 8) + + t.Logf("Attempting to create pool %s", poolName) + + createOpts := pools.CreateOpts{ + Name: poolName, + Protocol: pools.ProtocolTCP, + LoadbalancerID: lb.ID, + LBMethod: pools.LBMethodLeastConnections, + } + + pool, err := pools.Create(client, createOpts).Extract() + if err != nil { + return pool, err + } + + t.Logf("Successfully created pool %s", poolName) + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + return pool, fmt.Errorf("Timed out waiting for loadbalancer to become active") + } + + return pool, nil +} + +// DeleteListener will delete a specified listener. A fatal error will occur if +// the listener could not be deleted. This works best when used as a deferred +// function. +func DeleteListener(t *testing.T, client *gophercloud.ServiceClient, lbID, listenerID string) { + t.Logf("Attempting to delete listener %s", listenerID) + + if err := listeners.Delete(client, listenerID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete listener: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted listener %s", listenerID) +} + +// DeleteMember will delete a specified member. A fatal error will occur if the +// member could not be deleted. This works best when used as a deferred +// function. +func DeleteMember(t *testing.T, client *gophercloud.ServiceClient, lbID, poolID, memberID string) { + t.Logf("Attempting to delete member %s", memberID) + + if err := pools.DeleteMember(client, poolID, memberID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete member: %s", memberID) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted member %s", memberID) +} + +// DeleteLoadBalancer will delete a specified loadbalancer. A fatal error will +// occur if the loadbalancer could not be deleted. This works best when used +// as a deferred function. +func DeleteLoadBalancer(t *testing.T, client *gophercloud.ServiceClient, lbID string) { + t.Logf("Attempting to delete loadbalancer %s", lbID) + + if err := loadbalancers.Delete(client, lbID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete loadbalancer: %v", err) + } + + t.Logf("Waiting for loadbalancer %s to delete", lbID) + + if err := WaitForLoadBalancerState(client, lbID, "DELETED", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Loadbalancer did not delete in time.") + } + + t.Logf("Successfully deleted loadbalancer %s", lbID) +} + +// DeleteMonitor will delete a specified monitor. A fatal error will occur if +// the monitor could not be deleted. This works best when used as a deferred +// function. +func DeleteMonitor(t *testing.T, client *gophercloud.ServiceClient, lbID, monitorID string) { + t.Logf("Attempting to delete monitor %s", monitorID) + + if err := monitors.Delete(client, monitorID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete monitor: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted monitor %s", monitorID) +} + +// DeletePool will delete a specified pool. A fatal error will occur if the +// pool could not be deleted. This works best when used as a deferred function. +func DeletePool(t *testing.T, client *gophercloud.ServiceClient, lbID, poolID string) { + t.Logf("Attempting to delete pool %s", poolID) + + if err := pools.Delete(client, poolID).ExtractErr(); err != nil { + t.Fatalf("Unable to delete pool: %v", err) + } + + if err := WaitForLoadBalancerState(client, lbID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + t.Logf("Successfully deleted pool %s", poolID) +} + +// WaitForLoadBalancerState will wait until a loadbalancer reaches a given state. +func WaitForLoadBalancerState(client *gophercloud.ServiceClient, lbID, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := loadbalancers.Get(client, lbID).Extract() + if err != nil { + if httpStatus, ok := err.(gophercloud.ErrDefault404); ok { + if httpStatus.Actual == 404 { + if status == "DELETED" { + return true, nil + } + } + } + return false, err + } + + if current.ProvisioningStatus == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/listeners_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/listeners_test.go new file mode 100644 index 000000000000..2d2dd036954e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/listeners_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking lbaas_v2 listeners + +package lbaas_v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" +) + +func TestListenersList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := listeners.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list listeners: %v", err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + t.Fatalf("Unable to extract listeners: %v", err) + } + + for _, listener := range allListeners { + tools.PrintResource(t, listener) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancers_test.go new file mode 100644 index 000000000000..650eb2cc49d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/loadbalancers_test.go @@ -0,0 +1,178 @@ +// +build acceptance networking lbaas_v2 loadbalancers + +package lbaas_v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func TestLoadbalancersList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := loadbalancers.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list loadbalancers: %v", err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + t.Fatalf("Unable to extract loadbalancers: %v", err) + } + + for _, lb := range allLoadbalancers { + tools.PrintResource(t, lb) + } +} + +func TestLoadbalancersCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + lb, err := CreateLoadBalancer(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create loadbalancer: %v", err) + } + defer DeleteLoadBalancer(t, client, lb.ID) + + newLB, err := loadbalancers.Get(client, lb.ID).Extract() + if err != nil { + t.Fatalf("Unable to get loadbalancer: %v", err) + } + + tools.PrintResource(t, newLB) + + // Because of the time it takes to create a loadbalancer, + // this test will include some other resources. + + // Listener + listener, err := CreateListener(t, client, lb) + if err != nil { + t.Fatalf("Unable to create listener: %v", err) + } + defer DeleteListener(t, client, lb.ID, listener.ID) + + updateListenerOpts := listeners.UpdateOpts{ + Description: "Some listener description", + } + _, err = listeners.Update(client, listener.ID, updateListenerOpts).Extract() + if err != nil { + t.Fatalf("Unable to update listener") + } + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newListener, err := listeners.Get(client, listener.ID).Extract() + if err != nil { + t.Fatalf("Unable to get listener") + } + + tools.PrintResource(t, newListener) + + // Pool + pool, err := CreatePool(t, client, lb) + if err != nil { + t.Fatalf("Unable to create pool: %v", err) + } + defer DeletePool(t, client, lb.ID, pool.ID) + + updatePoolOpts := pools.UpdateOpts{ + Description: "Some pool description", + } + _, err = pools.Update(client, pool.ID, updatePoolOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newPool, err := pools.Get(client, pool.ID).Extract() + if err != nil { + t.Fatalf("Unable to get pool") + } + + tools.PrintResource(t, newPool) + + // Member + member, err := CreateMember(t, client, lb, newPool, subnet.ID, subnet.CIDR) + if err != nil { + t.Fatalf("Unable to create member: %v", err) + } + defer DeleteMember(t, client, lb.ID, pool.ID, member.ID) + + newWeight := tools.RandomInt(11, 100) + updateMemberOpts := pools.UpdateMemberOpts{ + Weight: newWeight, + } + _, err = pools.UpdateMember(client, pool.ID, member.ID, updateMemberOpts).Extract() + if err != nil { + t.Fatalf("Unable to update pool") + } + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMember, err := pools.GetMember(client, pool.ID, member.ID).Extract() + if err != nil { + t.Fatalf("Unable to get member") + } + + tools.PrintResource(t, newMember) + + // Monitor + monitor, err := CreateMonitor(t, client, lb, newPool) + if err != nil { + t.Fatalf("Unable to create monitor: %v", err) + } + defer DeleteMonitor(t, client, lb.ID, monitor.ID) + + newDelay := tools.RandomInt(20, 30) + updateMonitorOpts := monitors.UpdateOpts{ + Delay: newDelay, + } + _, err = monitors.Update(client, monitor.ID, updateMonitorOpts).Extract() + if err != nil { + t.Fatalf("Unable to update monitor") + } + + if err := WaitForLoadBalancerState(client, lb.ID, "ACTIVE", loadbalancerActiveTimeoutSeconds); err != nil { + t.Fatalf("Timed out waiting for loadbalancer to become active") + } + + newMonitor, err := monitors.Get(client, monitor.ID).Extract() + if err != nil { + t.Fatalf("Unable to get monitor") + } + + tools.PrintResource(t, newMonitor) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/monitors_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/monitors_test.go new file mode 100644 index 000000000000..b312370722db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/monitors_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking lbaas_v2 monitors + +package lbaas_v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" +) + +func TestMonitorsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := monitors.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list monitors: %v", err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + t.Fatalf("Unable to extract monitors: %v", err) + } + + for _, monitor := range allMonitors { + tools.PrintResource(t, monitor) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go new file mode 100644 index 000000000000..24b7482a56cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pkg.go @@ -0,0 +1 @@ +package lbaas_v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pools_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pools_test.go new file mode 100644 index 000000000000..b4f55a0f63f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/lbaas_v2/pools_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking lbaas_v2 pools + +package lbaas_v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +func TestPoolsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := pools.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list pools: %v", err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + t.Fatalf("Unable to extract pools: %v", err) + } + + for _, pool := range allPools { + tools.PrintResource(t, pool) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/networkipavailabilities_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/networkipavailabilities_test.go new file mode 100644 index 000000000000..f141d204a874 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/networkipavailabilities_test.go @@ -0,0 +1,32 @@ +// +build acceptance networking networkipavailabilities + +package networkipavailabilities + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities" +) + +func TestNetworkIPAvailabilityList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := networkipavailabilities.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list network IP availabilities: %v", err) + } + + allAvailabilities, err := networkipavailabilities.ExtractNetworkIPAvailabilities(allPages) + if err != nil { + t.Fatalf("Unable to extract network IP availabilities: %v", err) + } + + for _, availability := range allAvailabilities { + tools.PrintResource(t, availability) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/pkg.go new file mode 100644 index 000000000000..7399a405beed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/networkipavailabilities/pkg.go @@ -0,0 +1 @@ +package networkipavailabilities diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go new file mode 100644 index 000000000000..aeec0fa756e7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/pkg.go @@ -0,0 +1 @@ +package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go new file mode 100644 index 000000000000..5dae1b16605e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/pkg.go @@ -0,0 +1 @@ +package portsbinding diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding.go new file mode 100644 index 000000000000..5b7dc90787e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding.go @@ -0,0 +1,48 @@ +package portsbinding + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// PortWithBindingExt represents a port with the binding fields +type PortWithBindingExt struct { + ports.Port + portsbinding.PortsBindingExt +} + +// CreatePortsbinding will create a port on the specified subnet. An error will be +// returned if the port could not be created. +func CreatePortsbinding(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID, hostID string) (PortWithBindingExt, error) { + portName := tools.RandomString("TESTACC-", 8) + iFalse := false + + t.Logf("Attempting to create port: %s", portName) + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: &iFalse, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + createOpts := portsbinding.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + HostID: hostID, + } + + var s PortWithBindingExt + + err := ports.Create(client, createOpts).ExtractInto(&s) + if err != nil { + return s, err + } + + t.Logf("Successfully created port: %s", portName) + + return s, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go new file mode 100644 index 000000000000..6ce205925acf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/portsbinding/portsbinding_test.go @@ -0,0 +1,57 @@ +// +build acceptance networking + +package portsbinding + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func TestPortsbindingCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + // Define a host + hostID := "localhost" + + // Create port + port, err := CreatePortsbinding(t, client, network.ID, subnet.ID, hostID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer networking.DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Update port + newPortName := tools.RandomString("TESTACC-", 8) + updateOpts := ports.UpdateOpts{ + Name: newPortName, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go new file mode 100644 index 000000000000..44726ce27e45 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/provider_test.go @@ -0,0 +1,34 @@ +// +build acceptance networking provider + +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +func TestNetworksProviderCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a network + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + getResult := networks.Get(client, network.ID) + newNetwork, err := getResult.Extract() + if err != nil { + t.Fatalf("Unable to extract network: %v", err) + } + + tools.PrintResource(t, newNetwork) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/qos/ruletypes/ruletypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/qos/ruletypes/ruletypes_test.go new file mode 100644 index 000000000000..e14844b56aea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/qos/ruletypes/ruletypes_test.go @@ -0,0 +1,31 @@ +package ruletypes + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes" +) + +func TestListRuleTypes(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + return + } + + page, err := ruletypes.ListRuleTypes(client).AllPages() + if err != nil { + t.Fatalf("Failed to list rule types pages: %v", err) + return + } + + ruleTypes, err := ruletypes.ExtractRuleTypes(page) + if err != nil { + t.Fatalf("Failed to list rule types: %v", err) + return + } + + tools.PrintResource(t, ruleTypes) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/pkg.go new file mode 100644 index 000000000000..f682aeab0642 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/pkg.go @@ -0,0 +1 @@ +package rbacpolicies diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies.go new file mode 100644 index 000000000000..46903dff78fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies.go @@ -0,0 +1,43 @@ +package rbacpolicies + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies" +) + +// CreateRBACPolicy will create a rbac-policy. An error will be returned if the +// rbac-policy could not be created. +func CreateRBACPolicy(t *testing.T, client *gophercloud.ServiceClient, tenantID, networkID string) (*rbacpolicies.RBACPolicy, error) { + createOpts := rbacpolicies.CreateOpts{ + Action: rbacpolicies.ActionAccessShared, + ObjectType: "network", + TargetTenant: tenantID, + ObjectID: networkID, + } + + t.Logf("Trying to create rbac_policy") + + rbacPolicy, err := rbacpolicies.Create(client, createOpts).Extract() + if err != nil { + return rbacPolicy, err + } + + t.Logf("Successfully created rbac_policy") + return rbacPolicy, nil +} + +// DeleteRBACPolicy will delete a rbac-policy with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteRBACPolicy(t *testing.T, client *gophercloud.ServiceClient, rbacPolicyID string) { + t.Logf("Trying to delete rbac_policy: %s", rbacPolicyID) + + err := rbacpolicies.Delete(client, rbacPolicyID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete rbac_policy %s: %v", rbacPolicyID, err) + } + + t.Logf("Deleted rbac_policy: %s", rbacPolicyID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies_test.go new file mode 100644 index 000000000000..db838d2816a6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/rbacpolicies/rbacpolicies_test.go @@ -0,0 +1,113 @@ +// +build acceptance + +package rbacpolicies + +import ( + "os" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + projects "github.com/gophercloud/gophercloud/acceptance/openstack/identity/v3" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies" +) + +func TestRBACPolicyCRUD(t *testing.T) { + username := os.Getenv("OS_USERNAME") + if username != "admin" { + t.Skip("must be admin to run this test") + } + + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a network + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + tools.PrintResource(t, network) + + identityClient, err := clients.NewIdentityV3Client() + if err != nil { + t.Fatalf("Unable to obtain an identity client: %v") + } + + // Create a project/tenant + project, err := projects.CreateProject(t, identityClient, nil) + if err != nil { + t.Fatalf("Unable to create project: %v", err) + } + defer projects.DeleteProject(t, identityClient, project.ID) + + tools.PrintResource(t, project) + + // Create a rbac-policy + rbacPolicy, err := CreateRBACPolicy(t, client, project.ID, network.ID) + if err != nil { + t.Fatalf("Unable to create rbac-policy: %v", err) + } + defer DeleteRBACPolicy(t, client, rbacPolicy.ID) + + tools.PrintResource(t, rbacPolicy) + + // Create another project/tenant for rbac-update + project2, err := projects.CreateProject(t, identityClient, nil) + if err != nil { + t.Fatalf("Unable to create project2: %v", err) + } + defer projects.DeleteProject(t, identityClient, project2.ID) + + tools.PrintResource(t, project2) + + // Update a rbac-policy + updateOpts := rbacpolicies.UpdateOpts{ + TargetTenant: project2.ID, + } + + _, err = rbacpolicies.Update(client, rbacPolicy.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update rbac-policy: %v", err) + } + + // Get the rbac-policy by ID + t.Logf("Get rbac_policy by ID") + newrbacPolicy, err := rbacpolicies.Get(client, rbacPolicy.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve rbac policy: %v", err) + } + + tools.PrintResource(t, newrbacPolicy) +} + +func TestRBACPolicyList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + type rbacPolicy struct { + rbacpolicies.RBACPolicy + } + + var allRBACPolicies []rbacPolicy + + allPages, err := rbacpolicies.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list rbac policies: %v", err) + } + + err = rbacpolicies.ExtractRBACPolicesInto(allPages, &allRBACPolicies) + if err != nil { + t.Fatalf("Unable to extract rbac policies: %v", err) + } + + for _, rbacpolicy := range allRBACPolicies { + tools.PrintResource(t, rbacpolicy) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go new file mode 100644 index 000000000000..3810a4201413 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/security_test.go @@ -0,0 +1,105 @@ +// +build acceptance networking security + +package extensions + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networking "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" +) + +func TestSecurityGroupsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + listOpts := groups.ListOpts{} + allPages, err := groups.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list groups: %v", err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + t.Fatalf("Unable to extract groups: %v", err) + } + + for _, group := range allGroups { + tools.PrintResource(t, group) + } +} + +func TestSecurityGroupsCreateUpdateDelete(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + group, err := CreateSecurityGroup(t, client) + if err != nil { + t.Fatalf("Unable to create security group: %v", err) + } + defer DeleteSecurityGroup(t, client, group.ID) + + rule, err := CreateSecurityGroupRule(t, client, group.ID) + if err != nil { + t.Fatalf("Unable to create security group rule: %v", err) + } + defer DeleteSecurityGroupRule(t, client, rule.ID) + + tools.PrintResource(t, group) + + updateOpts := groups.UpdateOpts{ + Description: "A security group", + } + + newGroup, err := groups.Update(client, group.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update security group: %v", err) + } + + tools.PrintResource(t, newGroup) +} + +func TestSecurityGroupsPort(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + network, err := networking.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networking.DeleteNetwork(t, client, network.ID) + + subnet, err := networking.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networking.DeleteSubnet(t, client, subnet.ID) + + group, err := CreateSecurityGroup(t, client) + if err != nil { + t.Fatalf("Unable to create security group: %v", err) + } + defer DeleteSecurityGroup(t, client, group.ID) + + rule, err := CreateSecurityGroupRule(t, client, group.ID) + if err != nil { + t.Fatalf("Unable to create security group rule: %v", err) + } + defer DeleteSecurityGroupRule(t, client, rule.ID) + + port, err := CreatePortWithSecurityGroup(t, client, network.ID, subnet.ID, group.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer networking.DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools.go new file mode 100644 index 000000000000..fdf6318d52f9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools.go @@ -0,0 +1,45 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools" +) + +// CreateSubnetPool will create a subnetpool. An error will be returned if the +// subnetpool could not be created. +func CreateSubnetPool(t *testing.T, client *gophercloud.ServiceClient) (*subnetpools.SubnetPool, error) { + subnetPoolName := tools.RandomString("TESTACC-", 8) + subnetPoolPrefixes := []string{ + "10.0.0.0/8", + } + createOpts := subnetpools.CreateOpts{ + Name: subnetPoolName, + Prefixes: subnetPoolPrefixes, + } + + t.Logf("Attempting to create a subnetpool: %s", subnetPoolName) + + subnetPool, err := subnetpools.Create(client, createOpts).Extract() + if err != nil { + return nil, err + } + + t.Logf("Successfully created the subnetpool.") + return subnetPool, nil +} + +// DeleteSubnetPool will delete a subnetpool with a specified ID. +// A fatal error will occur if the delete was not successful. +func DeleteSubnetPool(t *testing.T, client *gophercloud.ServiceClient, subnetPoolID string) { + t.Logf("Attempting to delete the subnetpool: %s", subnetPoolID) + + err := subnetpools.Delete(client, subnetPoolID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete subnetpool %s: %v", subnetPoolID, err) + } + + t.Logf("Deleted subnetpool: %s", subnetPoolID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools_test.go new file mode 100644 index 000000000000..bdef6e67942d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools/subnetpools_test.go @@ -0,0 +1,65 @@ +// +build acceptance networking subnetpools + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools" +) + +func TestSubnetPoolsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a subnetpool + subnetPool, err := CreateSubnetPool(t, client) + if err != nil { + t.Fatalf("Unable to create a subnetpool: %v", err) + } + defer DeleteSubnetPool(t, client, subnetPool.ID) + + tools.PrintResource(t, subnetPool) + + newName := tools.RandomString("TESTACC-", 8) + updateOpts := &subnetpools.UpdateOpts{ + Name: newName, + } + + _, err = subnetpools.Update(client, subnetPool.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update the subnetpool: %v", err) + } + + newSubnetPool, err := subnetpools.Get(client, subnetPool.ID).Extract() + if err != nil { + t.Fatalf("Unable to get subnetpool: %v", err) + } + + tools.PrintResource(t, newSubnetPool) +} + +func TestSubnetPoolsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := subnetpools.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list subnetpools: %v", err) + } + + allSubnetPools, err := subnetpools.ExtractSubnetPools(allPages) + if err != nil { + t.Fatalf("Unable to extract subnetpools: %v", err) + } + + for _, subnetpool := range allSubnetPools { + tools.PrintResource(t, subnetpool) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/group_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/group_test.go new file mode 100644 index 000000000000..853065d2190e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/group_test.go @@ -0,0 +1,65 @@ +// +build acceptance networking vpnaas + +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups" +) + +func TestGroupList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := endpointgroups.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list endpoint groups: %v", err) + } + + allGroups, err := endpointgroups.ExtractEndpointGroups(allPages) + if err != nil { + t.Fatalf("Unable to extract endpoint groups: %v", err) + } + + for _, group := range allGroups { + tools.PrintResource(t, group) + } +} + +func TestGroupCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + group, err := CreateEndpointGroup(t, client) + if err != nil { + t.Fatalf("Unable to create Endpoint group: %v", err) + } + defer DeleteEndpointGroup(t, client, group.ID) + tools.PrintResource(t, group) + + newGroup, err := endpointgroups.Get(client, group.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve Endpoint group: %v", err) + } + tools.PrintResource(t, newGroup) + + updatedName := "updatedname" + updatedDescription := "updated description" + updateOpts := endpointgroups.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + } + updatedGroup, err := endpointgroups.Update(client, group.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update endpoint group: %v", err) + } + tools.PrintResource(t, updatedGroup) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ikepolicy_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ikepolicy_test.go new file mode 100644 index 000000000000..2efa1e1b65f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ikepolicy_test.go @@ -0,0 +1,69 @@ +// +build acceptance networking vpnaas + +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies" +) + +func TestIKEPolicyList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := ikepolicies.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list IKE policies: %v", err) + } + + allPolicies, err := ikepolicies.ExtractPolicies(allPages) + if err != nil { + t.Fatalf("Unable to extract IKE policies: %v", err) + } + + for _, policy := range allPolicies { + tools.PrintResource(t, policy) + } +} + +func TestIKEPolicyCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + policy, err := CreateIKEPolicy(t, client) + if err != nil { + t.Fatalf("Unable to create IKE policy: %v", err) + } + defer DeleteIKEPolicy(t, client, policy.ID) + + tools.PrintResource(t, policy) + + newPolicy, err := ikepolicies.Get(client, policy.ID).Extract() + if err != nil { + t.Fatalf("Unable to get IKE policy: %v", err) + } + tools.PrintResource(t, newPolicy) + + updatedName := "updatedname" + updatedDescription := "updated policy" + updateOpts := ikepolicies.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + Lifetime: &ikepolicies.LifetimeUpdateOpts{ + Value: 7000, + }, + } + updatedPolicy, err := ikepolicies.Update(client, policy.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update IKE policy: %v", err) + } + tools.PrintResource(t, updatedPolicy) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ipsecpolicy_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ipsecpolicy_test.go new file mode 100644 index 000000000000..2fdee7dd92cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/ipsecpolicy_test.go @@ -0,0 +1,63 @@ +// +build acceptance networking vpnaas + +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies" +) + +func TestIPSecPolicyList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := ipsecpolicies.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list IPSec policies: %v", err) + } + + allPolicies, err := ipsecpolicies.ExtractPolicies(allPages) + if err != nil { + t.Fatalf("Unable to extract policies: %v", err) + } + + for _, policy := range allPolicies { + tools.PrintResource(t, policy) + } +} + +func TestIPSecPolicyCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + policy, err := CreateIPSecPolicy(t, client) + if err != nil { + t.Fatalf("Unable to create IPSec policy: %v", err) + } + defer DeleteIPSecPolicy(t, client, policy.ID) + tools.PrintResource(t, policy) + + updatedDescription := "Updated policy description" + updateOpts := ipsecpolicies.UpdateOpts{ + Description: &updatedDescription, + } + + policy, err = ipsecpolicies.Update(client, policy.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update IPSec policy: %v", err) + } + tools.PrintResource(t, policy) + + newPolicy, err := ipsecpolicies.Get(client, policy.ID).Extract() + if err != nil { + t.Fatalf("Unable to get IPSec policy: %v", err) + } + tools.PrintResource(t, newPolicy) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/service_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/service_test.go new file mode 100644 index 000000000000..f88aa7611dfa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/service_test.go @@ -0,0 +1,60 @@ +// +build acceptance networking fwaas + +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + layer3 "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services" +) + +func TestServiceList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := services.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list services: %v", err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + t.Fatalf("Unable to extract services: %v", err) + } + + for _, service := range allServices { + tools.PrintResource(t, service) + } +} + +func TestServiceCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + router, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router.ID) + + service, err := CreateService(t, client, router.ID) + if err != nil { + t.Fatalf("Unable to create service: %v", err) + } + defer DeleteService(t, client, service.ID) + + newService, err := services.Get(client, service.ID).Extract() + if err != nil { + t.Fatalf("Unable to get service: %v", err) + } + + tools.PrintResource(t, service) + tools.PrintResource(t, newService) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/siteconnection_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/siteconnection_test.go new file mode 100644 index 000000000000..5bf756074734 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/siteconnection_test.go @@ -0,0 +1,125 @@ +// +build acceptance networking vpnaas + +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + networks "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2" + layer3 "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/layer3" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections" +) + +func TestConnectionList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := siteconnections.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list IPSec site connections: %v", err) + } + + allConnections, err := siteconnections.ExtractConnections(allPages) + if err != nil { + t.Fatalf("Unable to extract IPSec site connections: %v", err) + } + + for _, connection := range allConnections { + tools.PrintResource(t, connection) + } +} + +func TestConnectionCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := networks.CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer networks.DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := networks.CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer networks.DeleteSubnet(t, client, subnet.ID) + + router, err := layer3.CreateExternalRouter(t, client) + if err != nil { + t.Fatalf("Unable to create router: %v", err) + } + defer layer3.DeleteRouter(t, client, router.ID) + + // Link router and subnet + aiOpts := routers.AddInterfaceOpts{ + SubnetID: subnet.ID, + } + + _, err = routers.AddInterface(client, router.ID, aiOpts).Extract() + if err != nil { + t.Fatalf("Failed to add interface to router: %v", err) + } + defer func() { + riOpts := routers.RemoveInterfaceOpts{ + SubnetID: subnet.ID, + } + routers.RemoveInterface(client, router.ID, riOpts) + }() + + // Create all needed resources for the connection + service, err := CreateService(t, client, router.ID) + if err != nil { + t.Fatalf("Unable to create service: %v", err) + } + defer DeleteService(t, client, service.ID) + + ikepolicy, err := CreateIKEPolicy(t, client) + if err != nil { + t.Fatalf("Unable to create IKE policy: %v", err) + } + defer DeleteIKEPolicy(t, client, ikepolicy.ID) + + ipsecpolicy, err := CreateIPSecPolicy(t, client) + if err != nil { + t.Fatalf("Unable to create IPSec Policy: %v", err) + } + defer DeleteIPSecPolicy(t, client, ipsecpolicy.ID) + + peerEPGroup, err := CreateEndpointGroup(t, client) + if err != nil { + t.Fatalf("Unable to create Endpoint Group with CIDR endpoints: %v", err) + } + defer DeleteEndpointGroup(t, client, peerEPGroup.ID) + + localEPGroup, err := CreateEndpointGroupWithSubnet(t, client, subnet.ID) + if err != nil { + t.Fatalf("Unable to create Endpoint Group with subnet endpoints: %v", err) + } + defer DeleteEndpointGroup(t, client, localEPGroup.ID) + + conn, err := CreateSiteConnection(t, client, ikepolicy.ID, ipsecpolicy.ID, service.ID, peerEPGroup.ID, localEPGroup.ID) + if err != nil { + t.Fatalf("Unable to create IPSec Site Connection: %v", err) + } + defer DeleteSiteConnection(t, client, conn.ID) + + newConnection, err := siteconnections.Get(client, conn.ID).Extract() + if err != nil { + t.Fatalf("Unable to get connection: %v", err) + } + + tools.PrintResource(t, conn) + tools.PrintResource(t, newConnection) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/vpnaas.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/vpnaas.go new file mode 100644 index 000000000000..2194a4c70e07 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/vpnaas/vpnaas.go @@ -0,0 +1,258 @@ +package vpnaas + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections" +) + +// CreateService will create a Service with a random name and a specified router ID +// An error will be returned if the service could not be created. +func CreateService(t *testing.T, client *gophercloud.ServiceClient, routerID string) (*services.Service, error) { + serviceName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create service %s", serviceName) + + iTrue := true + createOpts := services.CreateOpts{ + Name: serviceName, + AdminStateUp: &iTrue, + RouterID: routerID, + } + service, err := services.Create(client, createOpts).Extract() + if err != nil { + return service, err + } + + t.Logf("Successfully created service %s", serviceName) + + return service, nil +} + +// DeleteService will delete a service with a specified ID. A fatal error +// will occur if the delete was not successful. This works best when used as +// a deferred function. +func DeleteService(t *testing.T, client *gophercloud.ServiceClient, serviceID string) { + t.Logf("Attempting to delete service: %s", serviceID) + + err := services.Delete(client, serviceID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete service %s: %v", serviceID, err) + } + + t.Logf("Service deleted: %s", serviceID) +} + +// CreateIPSecPolicy will create an IPSec Policy with a random name and given +// rule. An error will be returned if the rule could not be created. +func CreateIPSecPolicy(t *testing.T, client *gophercloud.ServiceClient) (*ipsecpolicies.Policy, error) { + policyName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create IPSec policy %s", policyName) + + createOpts := ipsecpolicies.CreateOpts{ + Name: policyName, + } + + policy, err := ipsecpolicies.Create(client, createOpts).Extract() + if err != nil { + return policy, err + } + + t.Logf("Successfully created IPSec policy %s", policyName) + + return policy, nil +} + +// CreateIKEPolicy will create an IKE Policy with a random name and given +// rule. An error will be returned if the policy could not be created. +func CreateIKEPolicy(t *testing.T, client *gophercloud.ServiceClient) (*ikepolicies.Policy, error) { + policyName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create IKE policy %s", policyName) + + createOpts := ikepolicies.CreateOpts{ + Name: policyName, + EncryptionAlgorithm: ikepolicies.EncryptionAlgorithm3DES, + PFS: ikepolicies.PFSGroup5, + } + + policy, err := ikepolicies.Create(client, createOpts).Extract() + if err != nil { + return policy, err + } + + t.Logf("Successfully created IKE policy %s", policyName) + + return policy, nil +} + +// DeleteIPSecPolicy will delete an IPSec policy with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteIPSecPolicy(t *testing.T, client *gophercloud.ServiceClient, policyID string) { + t.Logf("Attempting to delete IPSec policy: %s", policyID) + + err := ipsecpolicies.Delete(client, policyID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete IPSec policy %s: %v", policyID, err) + } + + t.Logf("Deleted IPSec policy: %s", policyID) +} + +// DeleteIKEPolicy will delete an IKE policy with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteIKEPolicy(t *testing.T, client *gophercloud.ServiceClient, policyID string) { + t.Logf("Attempting to delete policy: %s", policyID) + + err := ikepolicies.Delete(client, policyID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete IKE policy %s: %v", policyID, err) + } + + t.Logf("Deleted IKE policy: %s", policyID) +} + +// CreateEndpointGroup will create an endpoint group with a random name. +// An error will be returned if the group could not be created. +func CreateEndpointGroup(t *testing.T, client *gophercloud.ServiceClient) (*endpointgroups.EndpointGroup, error) { + groupName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create group %s", groupName) + + createOpts := endpointgroups.CreateOpts{ + Name: groupName, + Type: endpointgroups.TypeCIDR, + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + } + group, err := endpointgroups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + + t.Logf("Successfully created group %s", groupName) + + return group, nil +} + +// CreateEndpointGroupWithCIDR will create an endpoint group with a random name and a specified CIDR. +// An error will be returned if the group could not be created. +func CreateEndpointGroupWithCIDR(t *testing.T, client *gophercloud.ServiceClient, cidr string) (*endpointgroups.EndpointGroup, error) { + groupName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create group %s", groupName) + + createOpts := endpointgroups.CreateOpts{ + Name: groupName, + Type: endpointgroups.TypeCIDR, + Endpoints: []string{ + cidr, + }, + } + group, err := endpointgroups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + + t.Logf("Successfully created group %s", groupName) + t.Logf("%v", group) + + return group, nil +} + +// DeleteEndpointGroup will delete an Endpoint group with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteEndpointGroup(t *testing.T, client *gophercloud.ServiceClient, epGroupID string) { + t.Logf("Attempting to delete endpoint group: %s", epGroupID) + + err := endpointgroups.Delete(client, epGroupID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete endpoint group %s: %v", epGroupID, err) + } + + t.Logf("Deleted endpoint group: %s", epGroupID) + +} + +// CreateEndpointGroupWithSubnet will create an endpoint group with a random name. +// An error will be returned if the group could not be created. +func CreateEndpointGroupWithSubnet(t *testing.T, client *gophercloud.ServiceClient, subnetID string) (*endpointgroups.EndpointGroup, error) { + groupName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create group %s", groupName) + + createOpts := endpointgroups.CreateOpts{ + Name: groupName, + Type: endpointgroups.TypeSubnet, + Endpoints: []string{ + subnetID, + }, + } + group, err := endpointgroups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + + t.Logf("Successfully created group %s", groupName) + + return group, nil +} + +// CreateSiteConnection will create an IPSec site connection with a random name and specified +// IKE policy, IPSec policy, service, peer EP group and local EP Group. +// An error will be returned if the connection could not be created. +func CreateSiteConnection(t *testing.T, client *gophercloud.ServiceClient, ikepolicyID string, ipsecpolicyID string, serviceID string, peerEPGroupID string, localEPGroupID string) (*siteconnections.Connection, error) { + connectionName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create IPSec site connection %s", connectionName) + + createOpts := siteconnections.CreateOpts{ + Name: connectionName, + PSK: "secret", + Initiator: siteconnections.InitiatorBiDirectional, + AdminStateUp: gophercloud.Enabled, + IPSecPolicyID: ipsecpolicyID, + PeerEPGroupID: peerEPGroupID, + IKEPolicyID: ikepolicyID, + VPNServiceID: serviceID, + LocalEPGroupID: localEPGroupID, + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + MTU: 1500, + } + connection, err := siteconnections.Create(client, createOpts).Extract() + if err != nil { + return connection, err + } + + t.Logf("Successfully created IPSec Site Connection %s", connectionName) + + return connection, nil +} + +// DeleteSiteConnection will delete an IPSec site connection with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteSiteConnection(t *testing.T, client *gophercloud.ServiceClient, siteConnectionID string) { + t.Logf("Attempting to delete site connection: %s", siteConnectionID) + + err := siteconnections.Delete(client, siteConnectionID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete site connection %s: %v", siteConnectionID, err) + } + + t.Logf("Deleted site connection: %s", siteConnectionID) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networking.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networking.go new file mode 100644 index 000000000000..2abf4f9180b0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networking.go @@ -0,0 +1,413 @@ +package v2 + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +// CreateNetwork will create basic network. An error will be returned if the +// network could not be created. +func CreateNetwork(t *testing.T, client *gophercloud.ServiceClient) (*networks.Network, error) { + networkName := tools.RandomString("TESTACC-", 8) + createOpts := networks.CreateOpts{ + Name: networkName, + AdminStateUp: gophercloud.Enabled, + } + + t.Logf("Attempting to create network: %s", networkName) + + network, err := networks.Create(client, createOpts).Extract() + if err != nil { + return network, err + } + + t.Logf("Successfully created network.") + return network, nil +} + +// CreateNetworkWithoutPortSecurity will create a network without port security. +// An error will be returned if the network could not be created. +func CreateNetworkWithoutPortSecurity(t *testing.T, client *gophercloud.ServiceClient) (*networks.Network, error) { + networkName := tools.RandomString("TESTACC-", 8) + networkCreateOpts := networks.CreateOpts{ + Name: networkName, + AdminStateUp: gophercloud.Enabled, + } + + iFalse := false + createOpts := portsecurity.NetworkCreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + PortSecurityEnabled: &iFalse, + } + + t.Logf("Attempting to create network: %s", networkName) + + network, err := networks.Create(client, createOpts).Extract() + if err != nil { + return network, err + } + + t.Logf("Successfully created network.") + return network, nil +} + +// CreatePort will create a port on the specified subnet. An error will be +// returned if the port could not be created. +func CreatePort(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID string) (*ports.Port, error) { + portName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create port: %s", portName) + + createOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: gophercloud.Enabled, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + port, err := ports.Create(client, createOpts).Extract() + if err != nil { + return port, err + } + + if err := WaitForPortToCreate(client, port.ID, 60); err != nil { + return port, err + } + + newPort, err := ports.Get(client, port.ID).Extract() + if err != nil { + return newPort, err + } + + t.Logf("Successfully created port: %s", portName) + + return newPort, nil +} + +// CreatePortWithNoSecurityGroup will create a port with no security group +// attached. An error will be returned if the port could not be created. +func CreatePortWithNoSecurityGroup(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID string) (*ports.Port, error) { + portName := tools.RandomString("TESTACC-", 8) + iFalse := false + + t.Logf("Attempting to create port: %s", portName) + + createOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: &iFalse, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + SecurityGroups: &[]string{}, + } + + port, err := ports.Create(client, createOpts).Extract() + if err != nil { + return port, err + } + + if err := WaitForPortToCreate(client, port.ID, 60); err != nil { + return port, err + } + + newPort, err := ports.Get(client, port.ID).Extract() + if err != nil { + return newPort, err + } + + t.Logf("Successfully created port: %s", portName) + + return newPort, nil +} + +// CreatePortWithoutPortSecurity will create a port without port security on the +// specified subnet. An error will be returned if the port could not be created. +func CreatePortWithoutPortSecurity(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID string) (*ports.Port, error) { + portName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create port: %s", portName) + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: gophercloud.Enabled, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + iFalse := false + createOpts := portsecurity.PortCreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + PortSecurityEnabled: &iFalse, + } + + port, err := ports.Create(client, createOpts).Extract() + if err != nil { + return port, err + } + + if err := WaitForPortToCreate(client, port.ID, 60); err != nil { + return port, err + } + + newPort, err := ports.Get(client, port.ID).Extract() + if err != nil { + return newPort, err + } + + t.Logf("Successfully created port: %s", portName) + + return newPort, nil +} + +// CreateSubnet will create a subnet on the specified Network ID. An error +// will be returned if the subnet could not be created. +func CreateSubnet(t *testing.T, client *gophercloud.ServiceClient, networkID string) (*subnets.Subnet, error) { + subnetName := tools.RandomString("TESTACC-", 8) + subnetOctet := tools.RandomInt(1, 250) + subnetCIDR := fmt.Sprintf("192.168.%d.0/24", subnetOctet) + subnetGateway := fmt.Sprintf("192.168.%d.1", subnetOctet) + createOpts := subnets.CreateOpts{ + NetworkID: networkID, + CIDR: subnetCIDR, + IPVersion: 4, + Name: subnetName, + EnableDHCP: gophercloud.Disabled, + GatewayIP: &subnetGateway, + } + + t.Logf("Attempting to create subnet: %s", subnetName) + + subnet, err := subnets.Create(client, createOpts).Extract() + if err != nil { + return subnet, err + } + + t.Logf("Successfully created subnet.") + return subnet, nil +} + +// CreateSubnetWithDefaultGateway will create a subnet on the specified Network +// ID and have Neutron set the gateway by default An error will be returned if +// the subnet could not be created. +func CreateSubnetWithDefaultGateway(t *testing.T, client *gophercloud.ServiceClient, networkID string) (*subnets.Subnet, error) { + subnetName := tools.RandomString("TESTACC-", 8) + subnetOctet := tools.RandomInt(1, 250) + subnetCIDR := fmt.Sprintf("192.168.%d.0/24", subnetOctet) + createOpts := subnets.CreateOpts{ + NetworkID: networkID, + CIDR: subnetCIDR, + IPVersion: 4, + Name: subnetName, + EnableDHCP: gophercloud.Disabled, + } + + t.Logf("Attempting to create subnet: %s", subnetName) + + subnet, err := subnets.Create(client, createOpts).Extract() + if err != nil { + return subnet, err + } + + t.Logf("Successfully created subnet.") + return subnet, nil +} + +// CreateSubnetWithNoGateway will create a subnet with no gateway on the +// specified Network ID. An error will be returned if the subnet could not be +// created. +func CreateSubnetWithNoGateway(t *testing.T, client *gophercloud.ServiceClient, networkID string) (*subnets.Subnet, error) { + var noGateway = "" + subnetName := tools.RandomString("TESTACC-", 8) + subnetOctet := tools.RandomInt(1, 250) + subnetCIDR := fmt.Sprintf("192.168.%d.0/24", subnetOctet) + dhcpStart := fmt.Sprintf("192.168.%d.10", subnetOctet) + dhcpEnd := fmt.Sprintf("192.168.%d.200", subnetOctet) + createOpts := subnets.CreateOpts{ + NetworkID: networkID, + CIDR: subnetCIDR, + IPVersion: 4, + Name: subnetName, + EnableDHCP: gophercloud.Disabled, + GatewayIP: &noGateway, + AllocationPools: []subnets.AllocationPool{ + { + Start: dhcpStart, + End: dhcpEnd, + }, + }, + } + + t.Logf("Attempting to create subnet: %s", subnetName) + + subnet, err := subnets.Create(client, createOpts).Extract() + if err != nil { + return subnet, err + } + + t.Logf("Successfully created subnet.") + return subnet, nil +} + +// CreateSubnetWithSubnetPool will create a subnet associated with the provided subnetpool on the specified Network ID. +// An error will be returned if the subnet or the subnetpool could not be created. +func CreateSubnetWithSubnetPool(t *testing.T, client *gophercloud.ServiceClient, networkID string, subnetPoolID string) (*subnets.Subnet, error) { + subnetName := tools.RandomString("TESTACC-", 8) + subnetOctet := tools.RandomInt(1, 250) + subnetCIDR := fmt.Sprintf("10.%d.0.0/24", subnetOctet) + createOpts := subnets.CreateOpts{ + NetworkID: networkID, + CIDR: subnetCIDR, + IPVersion: 4, + Name: subnetName, + EnableDHCP: gophercloud.Disabled, + SubnetPoolID: subnetPoolID, + } + + t.Logf("Attempting to create subnet: %s", subnetName) + + subnet, err := subnets.Create(client, createOpts).Extract() + if err != nil { + return subnet, err + } + + t.Logf("Successfully created subnet.") + return subnet, nil +} + +// CreateSubnetWithSubnetPoolNoCIDR will create a subnet associated with the +// provided subnetpool on the specified Network ID. +// An error will be returned if the subnet or the subnetpool could not be created. +func CreateSubnetWithSubnetPoolNoCIDR(t *testing.T, client *gophercloud.ServiceClient, networkID string, subnetPoolID string) (*subnets.Subnet, error) { + subnetName := tools.RandomString("TESTACC-", 8) + createOpts := subnets.CreateOpts{ + NetworkID: networkID, + IPVersion: 4, + Name: subnetName, + EnableDHCP: gophercloud.Disabled, + SubnetPoolID: subnetPoolID, + } + + t.Logf("Attempting to create subnet: %s", subnetName) + + subnet, err := subnets.Create(client, createOpts).Extract() + if err != nil { + return subnet, err + } + + t.Logf("Successfully created subnet.") + return subnet, nil +} + +// DeleteNetwork will delete a network with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteNetwork(t *testing.T, client *gophercloud.ServiceClient, networkID string) { + t.Logf("Attempting to delete network: %s", networkID) + + err := networks.Delete(client, networkID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete network %s: %v", networkID, err) + } + + t.Logf("Deleted network: %s", networkID) +} + +// DeletePort will delete a port with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeletePort(t *testing.T, client *gophercloud.ServiceClient, portID string) { + t.Logf("Attempting to delete port: %s", portID) + + err := ports.Delete(client, portID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete port %s: %v", portID, err) + } + + t.Logf("Deleted port: %s", portID) +} + +// DeleteSubnet will delete a subnet with a specified ID. A fatal error will +// occur if the delete was not successful. This works best when used as a +// deferred function. +func DeleteSubnet(t *testing.T, client *gophercloud.ServiceClient, subnetID string) { + t.Logf("Attempting to delete subnet: %s", subnetID) + + err := subnets.Delete(client, subnetID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete subnet %s: %v", subnetID, err) + } + + t.Logf("Deleted subnet: %s", subnetID) +} + +func WaitForPortToCreate(client *gophercloud.ServiceClient, portID string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + p, err := ports.Get(client, portID).Extract() + if err != nil { + return false, err + } + + if p.Status == "ACTIVE" || p.Status == "DOWN" { + return true, nil + } + + return false, nil + }) +} + +// PortWithExtraDHCPOpts represents a port with extra DHCP options configuration. +type PortWithExtraDHCPOpts struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt +} + +// CreatePortWithExtraDHCPOpts will create a port with DHCP options on the +// specified subnet. An error will be returned if the port could not be created. +func CreatePortWithExtraDHCPOpts(t *testing.T, client *gophercloud.ServiceClient, networkID, subnetID string) (*PortWithExtraDHCPOpts, error) { + portName := tools.RandomString("TESTACC-", 8) + + t.Logf("Attempting to create port: %s", portName) + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + Name: portName, + AdminStateUp: gophercloud.Enabled, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + createOpts := extradhcpopts.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + ExtraDHCPOpts: []extradhcpopts.CreateExtraDHCPOpt{ + { + OptName: "test_option_1", + OptValue: "test_value_1", + }, + }, + } + port := &PortWithExtraDHCPOpts{} + + err := ports.Create(client, createOpts).ExtractInto(port) + if err != nil { + return nil, err + } + + if err := WaitForPortToCreate(client, port.ID, 60); err != nil { + return nil, err + } + + err = ports.Get(client, port.ID).ExtractInto(port) + if err != nil { + return port, err + } + + t.Logf("Successfully created port: %s", portName) + + return port, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networks_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networks_test.go new file mode 100644 index 000000000000..d7300a35d5df --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/networks_test.go @@ -0,0 +1,179 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestNetworksList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + type networkWithExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + var allNetworks []networkWithExt + + allPages, err := networks.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + t.Fatalf("Unable to extract networks: %v", err) + } + + for _, network := range allNetworks { + tools.PrintResource(t, network) + } +} + +func TestNetworksExternalList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to fetch environment information: %s", err) + } + + type networkWithExt struct { + networks.Network + external.NetworkExternalExt + } + + var allNetworks []networkWithExt + + iTrue := true + networkListOpts := networks.ListOpts{ + ID: choices.ExternalNetworkID, + } + listOpts := external.ListOptsExt{ + ListOptsBuilder: networkListOpts, + External: &iTrue, + } + + allPages, err := networks.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + t.Fatalf("Unable to extract networks: %v", err) + } + + var found bool + for _, network := range allNetworks { + if network.External == true && network.ID == choices.ExternalNetworkID { + found = true + } + } + + th.AssertEquals(t, found, true) + + iFalse := false + networkListOpts = networks.ListOpts{ + ID: choices.ExternalNetworkID, + } + listOpts = external.ListOptsExt{ + ListOptsBuilder: networkListOpts, + External: &iFalse, + } + + allPages, err = networks.List(client, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list networks: %v", err) + } + + v, err := networks.ExtractNetworks(allPages) + th.AssertEquals(t, len(v), 0) +} + +func TestNetworksCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + tools.PrintResource(t, network) + + newName := tools.RandomString("TESTACC-", 8) + updateOpts := &networks.UpdateOpts{ + Name: newName, + } + + _, err = networks.Update(client, network.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update network: %v", err) + } + + newNetwork, err := networks.Get(client, network.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve network: %v", err) + } + + tools.PrintResource(t, newNetwork) +} + +func TestNetworksPortSecurityCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a network without port security + network, err := CreateNetworkWithoutPortSecurity(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + var networkWithExtensions struct { + networks.Network + portsecurity.PortSecurityExt + } + + err = networks.Get(client, network.ID).ExtractInto(&networkWithExtensions) + if err != nil { + t.Fatalf("Unable to retrieve network: %v", err) + } + + tools.PrintResource(t, networkWithExtensions) + + iTrue := true + networkUpdateOpts := networks.UpdateOpts{} + updateOpts := portsecurity.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + PortSecurityEnabled: &iTrue, + } + + err = networks.Update(client, network.ID, updateOpts).ExtractInto(&networkWithExtensions) + if err != nil { + t.Fatalf("Unable to update network: %v", err) + } + + tools.PrintResource(t, networkWithExtensions) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/pkg.go new file mode 100644 index 000000000000..5ec3cc8e833b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/pkg.go @@ -0,0 +1 @@ +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/ports_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/ports_test.go new file mode 100644 index 000000000000..5b820d7e57ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/ports_test.go @@ -0,0 +1,466 @@ +// +build acceptance networking + +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + extensions "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +func TestPortsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := ports.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list ports: %v", err) + } + + allPorts, err := ports.ExtractPorts(allPages) + if err != nil { + t.Fatalf("Unable to extract ports: %v", err) + } + + for _, port := range allPorts { + tools.PrintResource(t, port) + } +} + +func TestPortsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePort(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + if len(port.SecurityGroups) != 1 { + t.Logf("WARNING: Port did not have a default security group applied") + } + + tools.PrintResource(t, port) + + // Update port + newPortName := tools.RandomString("TESTACC-", 8) + updateOpts := ports.UpdateOpts{ + Name: newPortName, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) +} + +func TestPortsRemoveSecurityGroups(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePort(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Create a Security Group + group, err := extensions.CreateSecurityGroup(t, client) + if err != nil { + t.Fatalf("Unable to create security group: %v", err) + } + defer extensions.DeleteSecurityGroup(t, client, group.ID) + + // Add the group to the port + updateOpts := ports.UpdateOpts{ + SecurityGroups: &[]string{group.ID}, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + // Remove the group + updateOpts = ports.UpdateOpts{ + SecurityGroups: &[]string{}, + } + newPort, err = ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) + + if len(newPort.SecurityGroups) > 0 { + t.Fatalf("Unable to remove security group from port") + } +} + +func TestPortsDontAlterSecurityGroups(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create a Security Group + group, err := extensions.CreateSecurityGroup(t, client) + if err != nil { + t.Fatalf("Unable to create security group: %v", err) + } + defer extensions.DeleteSecurityGroup(t, client, group.ID) + + // Create port + port, err := CreatePort(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Add the group to the port + updateOpts := ports.UpdateOpts{ + SecurityGroups: &[]string{group.ID}, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + // Update the port again + updateOpts = ports.UpdateOpts{ + Name: "some_port", + } + newPort, err = ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) + + if len(newPort.SecurityGroups) == 0 { + t.Fatalf("Port had security group updated") + } +} + +func TestPortsWithNoSecurityGroup(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePortWithNoSecurityGroup(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + if len(port.SecurityGroups) != 0 { + t.Fatalf("Port was created with security groups") + } +} + +func TestPortsRemoveAddressPair(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePort(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Add an address pair to the port + updateOpts := ports.UpdateOpts{ + AllowedAddressPairs: &[]ports.AddressPair{ + ports.AddressPair{IPAddress: "192.168.255.10", MACAddress: "aa:bb:cc:dd:ee:ff"}, + }, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + // Remove the address pair + updateOpts = ports.UpdateOpts{ + AllowedAddressPairs: &[]ports.AddressPair{}, + } + newPort, err = ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) + + if len(newPort.AllowedAddressPairs) > 0 { + t.Fatalf("Unable to remove the address pair") + } +} + +func TestPortsDontUpdateAllowedAddressPairs(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePort(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Add an address pair to the port + updateOpts := ports.UpdateOpts{ + AllowedAddressPairs: &[]ports.AddressPair{ + ports.AddressPair{IPAddress: "192.168.255.10", MACAddress: "aa:bb:cc:dd:ee:ff"}, + }, + } + newPort, err := ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) + + // Remove the address pair + updateOpts = ports.UpdateOpts{ + Name: "some_port", + } + newPort, err = ports.Update(client, port.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) + + if len(newPort.AllowedAddressPairs) == 0 { + t.Fatalf("Address Pairs were removed") + } +} + +func TestPortsPortSecurityCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create port + port, err := CreatePortWithoutPortSecurity(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + defer DeletePort(t, client, port.ID) + + var portWithExt struct { + ports.Port + portsecurity.PortSecurityExt + } + + err = ports.Get(client, port.ID).ExtractInto(&portWithExt) + if err != nil { + t.Fatalf("Unable to create port: %v", err) + } + + tools.PrintResource(t, portWithExt) + + iTrue := true + portUpdateOpts := ports.UpdateOpts{} + updateOpts := portsecurity.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + PortSecurityEnabled: &iTrue, + } + + err = ports.Update(client, port.ID, updateOpts).ExtractInto(&portWithExt) + if err != nil { + t.Fatalf("Unable to update port: %v", err) + } + + tools.PrintResource(t, portWithExt) +} + +func TestPortsWithExtraDHCPOptsCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create a Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create a network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create a Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create a subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + // Create a port with extra DHCP options. + port, err := CreatePortWithExtraDHCPOpts(t, client, network.ID, subnet.ID) + if err != nil { + t.Fatalf("Unable to create a port: %v", err) + } + defer DeletePort(t, client, port.ID) + + tools.PrintResource(t, port) + + // Update the port with extra DHCP options. + newPortName := tools.RandomString("TESTACC-", 8) + portUpdateOpts := ports.UpdateOpts{ + Name: newPortName, + } + + existingOpt := port.ExtraDHCPOpts[0] + newOptValue := "test_value_2" + + updateOpts := extradhcpopts.UpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + ExtraDHCPOpts: []extradhcpopts.UpdateExtraDHCPOpt{ + { + OptName: existingOpt.OptName, + OptValue: nil, + }, + { + OptName: "test_option_2", + OptValue: &newOptValue, + }, + }, + } + + newPort := &PortWithExtraDHCPOpts{} + err = ports.Update(client, port.ID, updateOpts).ExtractInto(newPort) + if err != nil { + t.Fatalf("Could not update port: %v", err) + } + + tools.PrintResource(t, newPort) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/subnets_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/subnets_test.go new file mode 100644 index 000000000000..6eb46074e574 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/subnets_test.go @@ -0,0 +1,227 @@ +// +build acceptance networking + +package v2 + +import ( + "fmt" + "strings" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + subnetpools "github.com/gophercloud/gophercloud/acceptance/openstack/networking/v2/extensions/subnetpools" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +func TestSubnetsList(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + allPages, err := subnets.List(client, nil).AllPages() + if err != nil { + t.Fatalf("Unable to list subnets: %v", err) + } + + allSubnets, err := subnets.ExtractSubnets(allPages) + if err != nil { + t.Fatalf("Unable to extract subnets: %v", err) + } + + for _, subnet := range allSubnets { + tools.PrintResource(t, subnet) + } +} + +func TestSubnetCRUD(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnet(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + // Update Subnet + newSubnetName := tools.RandomString("TESTACC-", 8) + updateOpts := subnets.UpdateOpts{ + Name: newSubnetName, + } + _, err = subnets.Update(client, subnet.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update subnet: %v", err) + } + + // Get subnet + newSubnet, err := subnets.Get(client, subnet.ID).Extract() + if err != nil { + t.Fatalf("Unable to get subnet: %v", err) + } + + tools.PrintResource(t, newSubnet) +} + +func TestSubnetsDefaultGateway(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnetWithDefaultGateway(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + if subnet.GatewayIP == "" { + t.Fatalf("A default gateway was not created.") + } + + var noGateway = "" + updateOpts := subnets.UpdateOpts{ + GatewayIP: &noGateway, + } + + newSubnet, err := subnets.Update(client, subnet.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update subnet") + } + + if newSubnet.GatewayIP != "" { + t.Fatalf("Gateway was not updated correctly") + } +} + +func TestSubnetsNoGateway(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create Subnet + subnet, err := CreateSubnetWithNoGateway(t, client, network.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + if subnet.GatewayIP != "" { + t.Fatalf("A gateway exists when it shouldn't.") + } + + subnetParts := strings.Split(subnet.CIDR, ".") + newGateway := fmt.Sprintf("%s.%s.%s.1", subnetParts[0], subnetParts[1], subnetParts[2]) + updateOpts := subnets.UpdateOpts{ + GatewayIP: &newGateway, + } + + newSubnet, err := subnets.Update(client, subnet.ID, updateOpts).Extract() + if err != nil { + t.Fatalf("Unable to update subnet") + } + + if newSubnet.GatewayIP == "" { + t.Fatalf("Gateway was not updated correctly") + } +} + +func TestSubnetsWithSubnetPool(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create SubnetPool + subnetPool, err := subnetpools.CreateSubnetPool(t, client) + if err != nil { + t.Fatalf("Unable to create subnet pool: %v", err) + } + defer subnetpools.DeleteSubnetPool(t, client, subnetPool.ID) + + // Create Subnet + subnet, err := CreateSubnetWithSubnetPool(t, client, network.ID, subnetPool.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + if subnet.GatewayIP == "" { + t.Fatalf("A subnet pool was not associated.") + } +} + +func TestSubnetsWithSubnetPoolNoCIDR(t *testing.T) { + client, err := clients.NewNetworkV2Client() + if err != nil { + t.Fatalf("Unable to create a network client: %v", err) + } + + // Create Network + network, err := CreateNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create network: %v", err) + } + defer DeleteNetwork(t, client, network.ID) + + // Create SubnetPool + subnetPool, err := subnetpools.CreateSubnetPool(t, client) + if err != nil { + t.Fatalf("Unable to create subnet pool: %v", err) + } + defer subnetpools.DeleteSubnetPool(t, client, subnetPool.ID) + + // Create Subnet + subnet, err := CreateSubnetWithSubnetPoolNoCIDR(t, client, network.ID, subnetPool.ID) + if err != nil { + t.Fatalf("Unable to create subnet: %v", err) + } + defer DeleteSubnet(t, client, subnet.ID) + + tools.PrintResource(t, subnet) + + if subnet.GatewayIP == "" { + t.Fatalf("A subnet pool was not associated.") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go new file mode 100644 index 000000000000..bb9745f83538 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/accounts_test.go @@ -0,0 +1,55 @@ +// +build acceptance + +package v1 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestAccounts(t *testing.T) { + client, err := clients.NewObjectStorageV1Client() + if err != nil { + t.Fatalf("Unable to create client: %v", err) + } + + // Update an account's metadata. + metadata := map[string]string{ + "Gophercloud-Test": "accounts", + } + updateres := accounts.Update(client, accounts.UpdateOpts{Metadata: metadata}) + t.Logf("Update Account Response: %+v\n", updateres) + updateHeaders, err := updateres.Extract() + th.AssertNoErr(t, err) + t.Logf("Update Account Response Headers: %+v\n", updateHeaders) + + // Defer the deletion of the metadata set above. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + updateres = accounts.Update(client, accounts.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, updateres.Err) + }() + + // Extract the custom metadata from the 'Get' response. + res := accounts.Get(client, nil) + + h, err := res.Extract() + th.AssertNoErr(t, err) + t.Logf("Get Account Response Headers: %+v\n", h) + + am, err := res.ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if am[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + return + } + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go new file mode 100644 index 000000000000..2b0319412dff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/containers_test.go @@ -0,0 +1,150 @@ +// +build acceptance + +package v1 + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// numContainers is the number of containers to create for testing. +var numContainers = 2 + +func TestContainers(t *testing.T) { + client, err := clients.NewObjectStorageV1Client() + if err != nil { + t.Fatalf("Unable to create client: %v", err) + } + + // Create a slice of random container names. + cNames := make([]string, numContainers) + for i := 0; i < numContainers; i++ { + cNames[i] = tools.RandomString("gophercloud-test-container-", 8) + } + + // Create numContainers containers. + for i := 0; i < len(cNames); i++ { + res := containers.Create(client, cNames[i], nil) + th.AssertNoErr(t, res.Err) + } + // Delete the numContainers containers after function completion. + defer func() { + for i := 0; i < len(cNames); i++ { + res := containers.Delete(client, cNames[i]) + th.AssertNoErr(t, res.Err) + } + }() + + // List the numContainer names that were just created. To just list those, + // the 'prefix' parameter is used. + err = containers.List(client, &containers.ListOpts{Full: true, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) { + containerList, err := containers.ExtractInfo(page) + th.AssertNoErr(t, err) + + for _, n := range containerList { + t.Logf("Container: Name [%s] Count [%d] Bytes [%d]", + n.Name, n.Count, n.Bytes) + } + + return true, nil + }) + th.AssertNoErr(t, err) + + // List the info for the numContainer containers that were created. + err = containers.List(client, &containers.ListOpts{Full: false, Prefix: "gophercloud-test-container-"}).EachPage(func(page pagination.Page) (bool, error) { + containerList, err := containers.ExtractNames(page) + th.AssertNoErr(t, err) + for _, n := range containerList { + t.Logf("Container: Name [%s]", n) + } + + return true, nil + }) + th.AssertNoErr(t, err) + + // Update one of the numContainer container metadata. + metadata := map[string]string{ + "Gophercloud-Test": "containers", + } + + updateres := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: metadata}) + th.AssertNoErr(t, updateres.Err) + // After the tests are done, delete the metadata that was set. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + res := containers.Update(client, cNames[0], &containers.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, res.Err) + }() + + // Retrieve a container's metadata. + getOpts := containers.GetOpts{ + Newest: true, + } + + cm, err := containers.Get(client, cNames[0], getOpts).ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if cm[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + } + } +} + +func TestListAllContainers(t *testing.T) { + client, err := clients.NewObjectStorageV1Client() + if err != nil { + t.Fatalf("Unable to create client: %v", err) + } + + numContainers := 20 + + // Create a slice of random container names. + cNames := make([]string, numContainers) + for i := 0; i < numContainers; i++ { + cNames[i] = tools.RandomString("gophercloud-test-container-", 8) + } + + // Create numContainers containers. + for i := 0; i < len(cNames); i++ { + res := containers.Create(client, cNames[i], nil) + th.AssertNoErr(t, res.Err) + } + // Delete the numContainers containers after function completion. + defer func() { + for i := 0; i < len(cNames); i++ { + res := containers.Delete(client, cNames[i]) + th.AssertNoErr(t, res.Err) + } + }() + + // List all the numContainer names that were just created. To just list those, + // the 'prefix' parameter is used. + allPages, err := containers.List(client, &containers.ListOpts{Full: true, Limit: 5, Prefix: "gophercloud-test-container-"}).AllPages() + th.AssertNoErr(t, err) + containerInfoList, err := containers.ExtractInfo(allPages) + th.AssertNoErr(t, err) + for _, n := range containerInfoList { + t.Logf("Container: Name [%s] Count [%d] Bytes [%d]", + n.Name, n.Count, n.Bytes) + } + th.AssertEquals(t, numContainers, len(containerInfoList)) + + // List the info for all the numContainer containers that were created. + allPages, err = containers.List(client, &containers.ListOpts{Full: false, Limit: 2, Prefix: "gophercloud-test-container-"}).AllPages() + th.AssertNoErr(t, err) + containerNamesList, err := containers.ExtractNames(allPages) + th.AssertNoErr(t, err) + for _, n := range containerNamesList { + t.Logf("Container: Name [%s]", n) + } + th.AssertEquals(t, numContainers, len(containerNamesList)) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go new file mode 100644 index 000000000000..ed3def62a392 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/objects_test.go @@ -0,0 +1,263 @@ +// +build acceptance + +package v1 + +import ( + "bytes" + "strings" + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// numObjects is the number of objects to create for testing. +var numObjects = 2 + +func TestObjects(t *testing.T) { + client, err := clients.NewObjectStorageV1Client() + if err != nil { + t.Fatalf("Unable to create client: %v", err) + } + + // Make a slice of length numObjects to hold the random object names. + oNames := make([]string, numObjects) + for i := 0; i < len(oNames); i++ { + oNames[i] = tools.RandomString("test-object-", 8) + } + + // Create a container to hold the test objects. + cName := tools.RandomString("test-container-", 8) + header, err := containers.Create(client, cName, nil).Extract() + th.AssertNoErr(t, err) + t.Logf("Create object headers: %+v\n", header) + + // Defer deletion of the container until after testing. + defer func() { + res := containers.Delete(client, cName) + th.AssertNoErr(t, res.Err) + }() + + // Create a slice of buffers to hold the test object content. + oContents := make([]*bytes.Buffer, numObjects) + for i := 0; i < numObjects; i++ { + oContents[i] = bytes.NewBuffer([]byte(tools.RandomString("", 10))) + createOpts := objects.CreateOpts{ + Content: oContents[i], + } + res := objects.Create(client, cName, oNames[i], createOpts) + th.AssertNoErr(t, res.Err) + } + // Delete the objects after testing. + defer func() { + for i := 0; i < numObjects; i++ { + res := objects.Delete(client, cName, oNames[i], nil) + th.AssertNoErr(t, res.Err) + } + }() + + // List all created objects + listOpts := objects.ListOpts{ + Full: true, + Prefix: "test-object-", + } + + allPages, err := objects.List(client, cName, listOpts).AllPages() + if err != nil { + t.Fatalf("Unable to list objects: %v", err) + } + + ons, err := objects.ExtractNames(allPages) + if err != nil { + t.Fatalf("Unable to extract objects: %v", err) + } + th.AssertEquals(t, len(ons), len(oNames)) + + ois, err := objects.ExtractInfo(allPages) + if err != nil { + t.Fatalf("Unable to extract object info: %v", err) + } + th.AssertEquals(t, len(ois), len(oNames)) + + // Copy the contents of one object to another. + copyOpts := objects.CopyOpts{ + Destination: cName + "/" + oNames[1], + } + copyres := objects.Copy(client, cName, oNames[0], copyOpts) + th.AssertNoErr(t, copyres.Err) + + // Download one of the objects that was created above. + downloadres := objects.Download(client, cName, oNames[0], nil) + th.AssertNoErr(t, downloadres.Err) + + o1Content, err := downloadres.ExtractContent() + th.AssertNoErr(t, err) + + // Download the another object that was create above. + downloadOpts := objects.DownloadOpts{ + Newest: true, + } + downloadres = objects.Download(client, cName, oNames[1], downloadOpts) + th.AssertNoErr(t, downloadres.Err) + o2Content, err := downloadres.ExtractContent() + th.AssertNoErr(t, err) + + // Compare the two object's contents to test that the copy worked. + th.AssertEquals(t, string(o2Content), string(o1Content)) + + // Update an object's metadata. + metadata := map[string]string{ + "Gophercloud-Test": "objects", + } + + updateOpts := objects.UpdateOpts{ + Metadata: metadata, + } + updateres := objects.Update(client, cName, oNames[0], updateOpts) + th.AssertNoErr(t, updateres.Err) + + // Delete the object's metadata after testing. + defer func() { + tempMap := make(map[string]string) + for k := range metadata { + tempMap[k] = "" + } + res := objects.Update(client, cName, oNames[0], &objects.UpdateOpts{Metadata: tempMap}) + th.AssertNoErr(t, res.Err) + }() + + // Retrieve an object's metadata. + getOpts := objects.GetOpts{ + Newest: true, + } + om, err := objects.Get(client, cName, oNames[0], getOpts).ExtractMetadata() + th.AssertNoErr(t, err) + for k := range metadata { + if om[k] != metadata[strings.Title(k)] { + t.Errorf("Expected custom metadata with key: %s", k) + return + } + } +} + +func TestObjectsListSubdir(t *testing.T) { + client, err := clients.NewObjectStorageV1Client() + if err != nil { + t.Fatalf("Unable to create client: %v", err) + } + + // Create a random subdirectory name. + cSubdir1 := tools.RandomString("test-subdir-", 8) + cSubdir2 := tools.RandomString("test-subdir-", 8) + + // Make a slice of length numObjects to hold the random object names. + oNames1 := make([]string, numObjects) + for i := 0; i < len(oNames1); i++ { + oNames1[i] = cSubdir1 + "/" + tools.RandomString("test-object-", 8) + } + + oNames2 := make([]string, numObjects) + for i := 0; i < len(oNames2); i++ { + oNames2[i] = cSubdir2 + "/" + tools.RandomString("test-object-", 8) + } + + // Create a container to hold the test objects. + cName := tools.RandomString("test-container-", 8) + _, err = containers.Create(client, cName, nil).Extract() + th.AssertNoErr(t, err) + + // Defer deletion of the container until after testing. + defer func() { + t.Logf("Deleting container %s", cName) + res := containers.Delete(client, cName) + th.AssertNoErr(t, res.Err) + }() + + // Create a slice of buffers to hold the test object content. + oContents1 := make([]*bytes.Buffer, numObjects) + for i := 0; i < numObjects; i++ { + oContents1[i] = bytes.NewBuffer([]byte(tools.RandomString("", 10))) + createOpts := objects.CreateOpts{ + Content: oContents1[i], + } + res := objects.Create(client, cName, oNames1[i], createOpts) + th.AssertNoErr(t, res.Err) + } + // Delete the objects after testing. + defer func() { + for i := 0; i < numObjects; i++ { + t.Logf("Deleting object %s", oNames1[i]) + res := objects.Delete(client, cName, oNames1[i], nil) + th.AssertNoErr(t, res.Err) + } + }() + + oContents2 := make([]*bytes.Buffer, numObjects) + for i := 0; i < numObjects; i++ { + oContents2[i] = bytes.NewBuffer([]byte(tools.RandomString("", 10))) + createOpts := objects.CreateOpts{ + Content: oContents2[i], + } + res := objects.Create(client, cName, oNames2[i], createOpts) + th.AssertNoErr(t, res.Err) + } + // Delete the objects after testing. + defer func() { + for i := 0; i < numObjects; i++ { + t.Logf("Deleting object %s", oNames2[i]) + res := objects.Delete(client, cName, oNames2[i], nil) + th.AssertNoErr(t, res.Err) + } + }() + + listOpts := objects.ListOpts{ + Full: true, + Delimiter: "/", + } + + allPages, err := objects.List(client, cName, listOpts).AllPages() + if err != nil { + t.Fatal(err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + t.Fatal(err) + } + + t.Logf("%#v\n", allObjects) + expected := []string{cSubdir1, cSubdir2} + for _, e := range expected { + var valid bool + for _, a := range allObjects { + if e+"/" == a { + valid = true + } + } + if !valid { + t.Fatalf("could not find %s in results", e) + } + } + + listOpts = objects.ListOpts{ + Full: true, + Delimiter: "/", + Prefix: cSubdir2, + } + + allPages, err = objects.List(client, cName, listOpts).AllPages() + if err != nil { + t.Fatal(err) + } + + allObjects, err = objects.ExtractNames(allPages) + if err != nil { + t.Fatal(err) + } + + th.AssertEquals(t, allObjects[0], cSubdir2+"/") + t.Logf("%#v\n", allObjects) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/pkg.go new file mode 100644 index 000000000000..b7b1f993d554 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/objectstorage/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go new file mode 100644 index 000000000000..1b4866291636 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/buildinfo_test.go @@ -0,0 +1,20 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestBuildInfo(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + bi, err := buildinfo.Get(client).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved build info: %+v\n", bi) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/common.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/common.go new file mode 100644 index 000000000000..4eec2e3156c8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/common.go @@ -0,0 +1,44 @@ +// +build acceptance + +package v1 + +import ( + "fmt" + "os" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + th "github.com/gophercloud/gophercloud/testhelper" +) + +var template = fmt.Sprintf(` +{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": {}, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "flavor": "%s", + "image": "%s", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } +}`, os.Getenv("OS_FLAVOR_ID"), os.Getenv("OS_IMAGE_ID")) + +func newClient(t *testing.T) *gophercloud.ServiceClient { + ao, err := openstack.AuthOptionsFromEnv() + th.AssertNoErr(t, err) + + client, err := openstack.AuthenticatedClient(ao) + th.AssertNoErr(t, err) + + c, err := openstack.NewOrchestrationV1(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + th.AssertNoErr(t, err) + return c +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json new file mode 100644 index 000000000000..11cfc8053428 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/hello-compute.json @@ -0,0 +1,13 @@ +{ + "heat_template_version": "2013-05-23", + "resources": { + "compute_instance": { + "type": "OS::Nova::Server", + "properties": { + "flavor": "m1.small", + "image": "cirros-0.3.2-x86_64-disk", + "name": "Single Compute Instance" + } + } + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/pkg.go new file mode 100644 index 000000000000..b7b1f993d554 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/pkg.go @@ -0,0 +1 @@ +package v1 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go new file mode 100644 index 000000000000..4be4bf676a51 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackevents_test.go @@ -0,0 +1,68 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestStackEvents(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + resourceName := "hello_world" + var eventID string + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + err = stackevents.List(client, stackName, stack.ID, nil).EachPage(func(page pagination.Page) (bool, error) { + events, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed events: %+v\n", events) + eventID = events[0].ID + return false, nil + }) + th.AssertNoErr(t, err) + + err = stackevents.ListResourceEvents(client, stackName, stack.ID, resourceName, nil).EachPage(func(page pagination.Page) (bool, error) { + resourceEvents, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + t.Logf("listed resource events: %+v\n", resourceEvents) + return false, nil + }) + th.AssertNoErr(t, err) + + event, err := stackevents.Get(client, stackName, stack.ID, resourceName, eventID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved event: %+v\n", event) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go new file mode 100644 index 000000000000..50a0f0631116 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stackresources_test.go @@ -0,0 +1,62 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestStackResources(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + resourceName := "hello_world" + resource, err := stackresources.Get(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource: %+v\n", resource) + + metadata, err := stackresources.Metadata(client, stackName, stack.ID, resourceName).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack resource metadata: %+v\n", metadata) + + err = stackresources.List(client, stackName, stack.ID, stackresources.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + resources, err := stackresources.ExtractResources(page) + th.AssertNoErr(t, err) + t.Logf("resources: %+v\n", resources) + return false, nil + }) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go new file mode 100644 index 000000000000..c87cc5d00e2a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacks_test.go @@ -0,0 +1,153 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestStacks(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + createOpts := stacks.CreateOpts{ + Name: stackName1, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := stacks.UpdateOpts{ + Template: template, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := stacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} + +// Test using the updated interface +func TestStacksNewTemplateFormat(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName1 := "gophercloud-test-stack-2" + templateOpts := new(osStacks.Template) + templateOpts.Bin = []byte(template) + createOpts := osStacks.CreateOpts{ + Name: stackName1, + TemplateOpts: templateOpts, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName1, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName1) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + updateOpts := osStacks.UpdateOpts{ + TemplateOpts: templateOpts, + Timeout: 20, + } + err = stacks.Update(client, stackName1, stack.ID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "UPDATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + t.Logf("Updated stack") + + err = stacks.List(client, nil).EachPage(func(page pagination.Page) (bool, error) { + stackList, err := osStacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + t.Logf("Got stack list: %+v\n", stackList) + + return true, nil + }) + th.AssertNoErr(t, err) + + getStack, err := stacks.Get(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Got stack: %+v\n", getStack) + + abandonedStack, err := stacks.Abandon(client, stackName1, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("Abandonded stack %+v\n", abandonedStack) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go new file mode 100644 index 000000000000..9992e0c044e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/orchestration/v1/stacktemplates_test.go @@ -0,0 +1,75 @@ +// +build acceptance + +package v1 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestStackTemplates(t *testing.T) { + // Create a provider client for making the HTTP requests. + // See common.go in this directory for more information. + client := newClient(t) + + stackName := "postman_stack_2" + + createOpts := stacks.CreateOpts{ + Name: stackName, + Template: template, + Timeout: 5, + } + stack, err := stacks.Create(client, createOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("Created stack: %+v\n", stack) + defer func() { + err := stacks.Delete(client, stackName, stack.ID).ExtractErr() + th.AssertNoErr(t, err) + t.Logf("Deleted stack (%s)", stackName) + }() + err = gophercloud.WaitFor(60, func() (bool, error) { + getStack, err := stacks.Get(client, stackName, stack.ID).Extract() + if err != nil { + return false, err + } + if getStack.Status == "CREATE_COMPLETE" { + return true, nil + } + return false, nil + }) + + tmpl, err := stacktemplates.Get(client, stackName, stack.ID).Extract() + th.AssertNoErr(t, err) + t.Logf("retrieved template: %+v\n", tmpl) + + validateOpts := osStacktemplates.ValidateOpts{ + Template: `{"heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string", + }, + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor", + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n", + }, + }, + }, + }`} + validatedTemplate, err := stacktemplates.Validate(client, validateOpts).Extract() + th.AssertNoErr(t, err) + t.Logf("validated template: %+v\n", validatedTemplate) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/pkg.go new file mode 100644 index 000000000000..ef11064a4e23 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/pkg.go @@ -0,0 +1,3 @@ +// +build acceptance + +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/availabilityzones_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/availabilityzones_test.go new file mode 100644 index 000000000000..8841160a2414 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/availabilityzones_test.go @@ -0,0 +1,29 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones" +) + +func TestAvailabilityZonesList(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + allPages, err := availabilityzones.List(client).AllPages() + if err != nil { + t.Fatalf("Unable to list availability zones: %v", err) + } + + zones, err := availabilityzones.ExtractAvailabilityZones(allPages) + if err != nil { + t.Fatalf("Unable to extract availability zones: %v", err) + } + + if len(zones) == 0 { + t.Fatal("At least one availability zone was expected to be found") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/pkg.go new file mode 100644 index 000000000000..5a5cd2b3f0f3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/pkg.go @@ -0,0 +1,3 @@ +// The v2 package contains acceptance tests for the Openstack Manila V2 service. + +package v2 diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices.go new file mode 100644 index 000000000000..265323d47927 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices.go @@ -0,0 +1,60 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices" +) + +// CreateSecurityService will create a security service with a random name. An +// error will be returned if the security service was unable to be created. +func CreateSecurityService(t *testing.T, client *gophercloud.ServiceClient) (*securityservices.SecurityService, error) { + if testing.Short() { + t.Skip("Skipping test that requires share network creation in short mode.") + } + + securityServiceName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create security service: %s", securityServiceName) + + createOpts := securityservices.CreateOpts{ + Name: securityServiceName, + Type: "kerberos", + } + + securityService, err := securityservices.Create(client, createOpts).Extract() + if err != nil { + return securityService, err + } + + return securityService, nil +} + +// DeleteSecurityService will delete a security service. An error will occur if +// the security service was unable to be deleted. +func DeleteSecurityService(t *testing.T, client *gophercloud.ServiceClient, securityService *securityservices.SecurityService) { + err := securityservices.Delete(client, securityService.ID).ExtractErr() + if err != nil { + t.Fatalf("Failed to delete security service %s: %v", securityService.ID, err) + } + + t.Logf("Deleted security service: %s", securityService.ID) +} + +// PrintSecurityService will print a security service and all of its attributes. +func PrintSecurityService(t *testing.T, securityService *securityservices.SecurityService) { + t.Logf("ID: %s", securityService.ID) + t.Logf("Project ID: %s", securityService.ProjectID) + t.Logf("Domain: %s", securityService.Domain) + t.Logf("Status: %s", securityService.Status) + t.Logf("Type: %s", securityService.Type) + t.Logf("Name: %s", securityService.Name) + t.Logf("Description: %s", securityService.Description) + t.Logf("DNS IP: %s", securityService.DNSIP) + t.Logf("User: %s", securityService.User) + t.Logf("Password: %s", securityService.Password) + t.Logf("Server: %s", securityService.Server) + t.Logf("Created at: %v", securityService.CreatedAt) + t.Logf("Updated at: %v", securityService.UpdatedAt) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices_test.go new file mode 100644 index 000000000000..ecf8cc9a1c8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/securityservices_test.go @@ -0,0 +1,142 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices" +) + +func TestSecurityServiceCreateDelete(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + securityService, err := CreateSecurityService(t, client) + if err != nil { + t.Fatalf("Unable to create security service: %v", err) + } + + newSecurityService, err := securityservices.Get(client, securityService.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve the security service: %v", err) + } + + if newSecurityService.Name != securityService.Name { + t.Fatalf("Security service name was expeted to be: %s", securityService.Name) + } + + PrintSecurityService(t, securityService) + + defer DeleteSecurityService(t, client, securityService) +} + +func TestSecurityServiceList(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + allPages, err := securityservices.List(client, securityservices.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve security services: %v", err) + } + + allSecurityServices, err := securityservices.ExtractSecurityServices(allPages) + if err != nil { + t.Fatalf("Unable to extract security services: %v", err) + } + + for _, securityService := range allSecurityServices { + PrintSecurityService(t, &securityService) + } +} + +// The test creates 2 security services and verifies that only the one(s) with +// a particular name are being listed +func TestSecurityServiceListFiltering(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + securityService, err := CreateSecurityService(t, client) + if err != nil { + t.Fatalf("Unable to create security service: %v", err) + } + defer DeleteSecurityService(t, client, securityService) + + securityService, err = CreateSecurityService(t, client) + if err != nil { + t.Fatalf("Unable to create security service: %v", err) + } + defer DeleteSecurityService(t, client, securityService) + + options := securityservices.ListOpts{ + Name: securityService.Name, + } + + allPages, err := securityservices.List(client, options).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve security services: %v", err) + } + + allSecurityServices, err := securityservices.ExtractSecurityServices(allPages) + if err != nil { + t.Fatalf("Unable to extract security services: %v", err) + } + + for _, listedSecurityService := range allSecurityServices { + if listedSecurityService.Name != securityService.Name { + t.Fatalf("The name of the security service was expected to be %s", securityService.Name) + } + PrintSecurityService(t, &listedSecurityService) + } +} + +// Create a security service and update the name and description. Get the security +// service and verify that the name and description have been updated +func TestSecurityServiceUpdate(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + securityService, err := CreateSecurityService(t, client) + if err != nil { + t.Fatalf("Unable to create security service: %v", err) + } + + options := securityservices.UpdateOpts{ + Name: "NewName", + Description: "New security service description", + Type: "ldap", + } + + _, err = securityservices.Update(client, securityService.ID, options).Extract() + if err != nil { + t.Errorf("Unable to update the security service: %v", err) + } + + newSecurityService, err := securityservices.Get(client, securityService.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve the security service: %v", err) + } + + if newSecurityService.Name != options.Name { + t.Fatalf("Security service name was expeted to be: %s", options.Name) + } + + if newSecurityService.Description != options.Description { + t.Fatalf("Security service description was expeted to be: %s", options.Description) + } + + if newSecurityService.Type != options.Type { + t.Fatalf("Security service type was expected to be: %s", options.Type) + } + + PrintSecurityService(t, securityService) + + defer DeleteSecurityService(t, client, securityService) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks.go new file mode 100644 index 000000000000..b0aefd8577be --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks.go @@ -0,0 +1,60 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks" +) + +// CreateShareNetwork will create a share network with a random name. An +// error will be returned if the share network was unable to be created. +func CreateShareNetwork(t *testing.T, client *gophercloud.ServiceClient) (*sharenetworks.ShareNetwork, error) { + if testing.Short() { + t.Skip("Skipping test that requires share network creation in short mode.") + } + + shareNetworkName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create share network: %s", shareNetworkName) + + createOpts := sharenetworks.CreateOpts{ + Name: shareNetworkName, + Description: "This is a shared network", + } + + shareNetwork, err := sharenetworks.Create(client, createOpts).Extract() + if err != nil { + return shareNetwork, err + } + + return shareNetwork, nil +} + +// DeleteShareNetwork will delete a share network. An error will occur if +// the share network was unable to be deleted. +func DeleteShareNetwork(t *testing.T, client *gophercloud.ServiceClient, shareNetwork *sharenetworks.ShareNetwork) { + err := sharenetworks.Delete(client, shareNetwork.ID).ExtractErr() + if err != nil { + t.Fatalf("Failed to delete share network %s: %v", shareNetwork.ID, err) + } + + t.Logf("Deleted share network: %s", shareNetwork.ID) +} + +// PrintShareNetwork will print a share network and all of its attributes. +func PrintShareNetwork(t *testing.T, sharenetwork *sharenetworks.ShareNetwork) { + t.Logf("ID: %s", sharenetwork.ID) + t.Logf("Project ID: %s", sharenetwork.ProjectID) + t.Logf("Neutron network ID: %s", sharenetwork.NeutronNetID) + t.Logf("Neutron sub-network ID: %s", sharenetwork.NeutronSubnetID) + t.Logf("Nova network ID: %s", sharenetwork.NovaNetID) + t.Logf("Network type: %s", sharenetwork.NetworkType) + t.Logf("Segmentation ID: %d", sharenetwork.SegmentationID) + t.Logf("CIDR: %s", sharenetwork.CIDR) + t.Logf("IP version: %d", sharenetwork.IPVersion) + t.Logf("Name: %s", sharenetwork.Name) + t.Logf("Description: %s", sharenetwork.Description) + t.Logf("Created at: %v", sharenetwork.CreatedAt) + t.Logf("Updated at: %v", sharenetwork.UpdatedAt) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks_test.go new file mode 100644 index 000000000000..7bf760f8aef2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharenetworks_test.go @@ -0,0 +1,223 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestShareNetworkCreateDestroy(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + shareNetwork, err := CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + + newShareNetwork, err := sharenetworks.Get(client, shareNetwork.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve shareNetwork: %v", err) + } + + if newShareNetwork.Name != shareNetwork.Name { + t.Fatalf("Share network name was expeted to be: %s", shareNetwork.Name) + } + + PrintShareNetwork(t, shareNetwork) + + defer DeleteShareNetwork(t, client, shareNetwork) +} + +// Create a share network and update the name and description. Get the share +// network and verify that the name and description have been updated +func TestShareNetworkUpdate(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + shareNetwork, err := CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + + expectedShareNetwork, err := sharenetworks.Get(client, shareNetwork.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve shareNetwork: %v", err) + } + + options := sharenetworks.UpdateOpts{ + Name: "NewName", + Description: "New share network description", + } + + expectedShareNetwork.Name = options.Name + expectedShareNetwork.Description = options.Description + + _, err = sharenetworks.Update(client, shareNetwork.ID, options).Extract() + if err != nil { + t.Errorf("Unable to update shareNetwork: %v", err) + } + + updatedShareNetwork, err := sharenetworks.Get(client, shareNetwork.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve shareNetwork: %v", err) + } + + // Update time has to be set in order to get the assert equal to pass + expectedShareNetwork.UpdatedAt = updatedShareNetwork.UpdatedAt + + th.CheckDeepEquals(t, expectedShareNetwork, updatedShareNetwork) + + PrintShareNetwork(t, shareNetwork) + + defer DeleteShareNetwork(t, client, shareNetwork) +} + +func TestShareNetworkListDetail(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + allPages, err := sharenetworks.ListDetail(client, sharenetworks.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve share networks: %v", err) + } + + allShareNetworks, err := sharenetworks.ExtractShareNetworks(allPages) + if err != nil { + t.Fatalf("Unable to extract share networks: %v", err) + } + + for _, shareNetwork := range allShareNetworks { + PrintShareNetwork(t, &shareNetwork) + } +} + +// The test creates 2 shared networks and verifies that only the one(s) with +// a particular name are being listed +func TestShareNetworkListFiltering(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + shareNetwork, err := CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + defer DeleteShareNetwork(t, client, shareNetwork) + + shareNetwork, err = CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + defer DeleteShareNetwork(t, client, shareNetwork) + + options := sharenetworks.ListOpts{ + Name: shareNetwork.Name, + } + + allPages, err := sharenetworks.ListDetail(client, options).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve share networks: %v", err) + } + + allShareNetworks, err := sharenetworks.ExtractShareNetworks(allPages) + if err != nil { + t.Fatalf("Unable to extract share networks: %v", err) + } + + for _, listedShareNetwork := range allShareNetworks { + if listedShareNetwork.Name != shareNetwork.Name { + t.Fatalf("The name of the share network was expected to be %s", shareNetwork.Name) + } + PrintShareNetwork(t, &listedShareNetwork) + } +} + +func TestShareNetworkListPagination(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + shareNetwork, err := CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + defer DeleteShareNetwork(t, client, shareNetwork) + + shareNetwork, err = CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + defer DeleteShareNetwork(t, client, shareNetwork) + + count := 0 + + err = sharenetworks.ListDetail(client, sharenetworks.ListOpts{Offset: 0, Limit: 1}).EachPage(func(page pagination.Page) (bool, error) { + count++ + _, err := sharenetworks.ExtractShareNetworks(page) + if err != nil { + t.Fatalf("Failed to extract share networks: %v", err) + return false, err + } + + return true, nil + }) + if err != nil { + t.Fatalf("Unable to retrieve share networks: %v", err) + } + + if count < 2 { + t.Fatal("Expected to get at least 2 pages") + } + +} + +func TestShareNetworkAddRemoveSecurityService(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + securityService, err := CreateSecurityService(t, client) + if err != nil { + t.Fatalf("Unable to create security service: %v", err) + } + defer DeleteSecurityService(t, client, securityService) + + shareNetwork, err := CreateShareNetwork(t, client) + if err != nil { + t.Fatalf("Unable to create share network: %v", err) + } + defer DeleteShareNetwork(t, client, shareNetwork) + + options := sharenetworks.AddSecurityServiceOpts{ + SecurityServiceID: securityService.ID, + } + + _, err = sharenetworks.AddSecurityService(client, shareNetwork.ID, options).Extract() + if err != nil { + t.Errorf("Unable to add security service: %v", err) + } + + removeOptions := sharenetworks.RemoveSecurityServiceOpts{ + SecurityServiceID: securityService.ID, + } + + _, err = sharenetworks.RemoveSecurityService(client, shareNetwork.ID, removeOptions).Extract() + if err != nil { + t.Errorf("Unable to remove security service: %v", err) + } + + PrintShareNetwork(t, shareNetwork) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares.go new file mode 100644 index 000000000000..9aaf3e5865c6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares.go @@ -0,0 +1,139 @@ +package v2 + +import ( + "encoding/json" + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" +) + +// CreateShare will create a share with a name, and a size of 1Gb. An +// error will be returned if the share could not be created +func CreateShare(t *testing.T, client *gophercloud.ServiceClient) (*shares.Share, error) { + if testing.Short() { + t.Skip("Skipping test that requres share creation in short mode.") + } + + choices, err := clients.AcceptanceTestChoicesFromEnv() + if err != nil { + t.Fatalf("Unable to fetch environment information") + } + + t.Logf("Share network id %s", choices.ShareNetworkID) + createOpts := shares.CreateOpts{ + Size: 1, + Name: "My Test Share", + ShareProto: "NFS", + ShareNetworkID: choices.ShareNetworkID, + } + + share, err := shares.Create(client, createOpts).Extract() + if err != nil { + return share, err + } + + err = waitForStatus(client, share.ID, "available", 600) + if err != nil { + return share, err + } + + return share, nil +} + +// ListShares lists all shares that belong to this tenant's project. +// An error will be returned if the shares could not be listed.. +func ListShares(t *testing.T, client *gophercloud.ServiceClient) ([]shares.Share, error) { + r, err := shares.ListDetail(client, &shares.ListOpts{}).AllPages() + if err != nil { + return nil, err + } + + return shares.ExtractShares(r) +} + +// GrantAccess will grant access to an existing share. A fatal error will occur if +// this operation fails. +func GrantAccess(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share) (*shares.AccessRight, error) { + return shares.GrantAccess(client, share.ID, shares.GrantAccessOpts{ + AccessType: "ip", + AccessTo: "0.0.0.0/32", + AccessLevel: "r", + }).Extract() +} + +// RevokeAccess will revoke an exisiting access of a share. A fatal error will occur +// if this operation fails. +func RevokeAccess(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share, accessRight *shares.AccessRight) error { + return shares.RevokeAccess(client, share.ID, shares.RevokeAccessOpts{ + AccessID: accessRight.ID, + }).ExtractErr() +} + +// GetAccessRightsSlice will retrieve all access rules assigned to a share. +// A fatal error will occur if this operation fails. +func GetAccessRightsSlice(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share) ([]shares.AccessRight, error) { + return shares.ListAccessRights(client, share.ID).Extract() +} + +// DeleteShare will delete a share. A fatal error will occur if the share +// failed to be deleted. This works best when used as a deferred function. +func DeleteShare(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share) { + err := shares.Delete(client, share.ID).ExtractErr() + if err != nil { + t.Fatalf("Unable to delete share %s: %v", share.ID, err) + } + + t.Logf("Deleted share: %s", share.ID) +} + +// PrintShare prints some information of the share +func PrintShare(t *testing.T, share *shares.Share) { + asJSON, err := json.MarshalIndent(share, "", " ") + if err != nil { + t.Logf("Cannot print the contents of %s", share.ID) + } + + t.Logf("Share %s", string(asJSON)) +} + +// PrintAccessRight prints contents of an access rule +func PrintAccessRight(t *testing.T, accessRight *shares.AccessRight) { + asJSON, err := json.MarshalIndent(accessRight, "", " ") + if err != nil { + t.Logf("Cannot print access rule") + } + + t.Logf("Access rule %s", string(asJSON)) +} + +// ExtendShare extends the capacity of an existing share +func ExtendShare(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share, newSize int) error { + return shares.Extend(client, share.ID, &shares.ExtendOpts{NewSize: newSize}).ExtractErr() +} + +// ShrinkShare shrinks the capacity of an existing share +func ShrinkShare(t *testing.T, client *gophercloud.ServiceClient, share *shares.Share, newSize int) error { + return shares.Shrink(client, share.ID, &shares.ShrinkOpts{NewSize: newSize}).ExtractErr() +} + +func waitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := shares.Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == "error" { + return true, fmt.Errorf("An error occurred") + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares_test.go new file mode 100644 index 000000000000..fd8885c76641 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/shares_test.go @@ -0,0 +1,150 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" +) + +func TestShareCreate(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a sharedfs client: %v", err) + } + + share, err := CreateShare(t, client) + if err != nil { + t.Fatalf("Unable to create a share: %v", err) + } + + defer DeleteShare(t, client, share) + + created, err := shares.Get(client, share.ID).Extract() + if err != nil { + t.Errorf("Unable to retrieve share: %v", err) + } + PrintShare(t, created) +} + +func TestShareListDetail(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a sharedfs client: %v", err) + } + + share, err := CreateShare(t, client) + if err != nil { + t.Fatalf("Unable to create a share: %v", err) + } + + defer DeleteShare(t, client, share) + + ss, err := ListShares(t, client) + if err != nil { + t.Fatalf("Unable to list shares: %v", err) + } + + for i := range ss { + PrintShare(t, &ss[i]) + } +} + +func TestGrantAndRevokeAccess(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a sharedfs client: %v", err) + } + + share, err := CreateShare(t, client) + if err != nil { + t.Fatalf("Unable to create a share: %v", err) + } + + defer DeleteShare(t, client, share) + + accessRight, err := GrantAccess(t, client, share) + if err != nil { + t.Fatalf("Unable to grant access: %v", err) + } + + PrintAccessRight(t, accessRight) + + if err = RevokeAccess(t, client, share, accessRight); err != nil { + t.Fatalf("Unable to revoke access: %v", err) + } +} + +func TestListAccessRights(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a sharedfs client: %v", err) + } + + share, err := CreateShare(t, client) + if err != nil { + t.Fatalf("Unable to create a share: %v", err) + } + + defer DeleteShare(t, client, share) + + _, err = GrantAccess(t, client, share) + if err != nil { + t.Fatalf("Unable to grant access: %v", err) + } + + rs, err := GetAccessRightsSlice(t, client, share) + if err != nil { + t.Fatalf("Unable to retrieve list of access rules for share %s: %v", share.ID, err) + } + + if len(rs) != 1 { + t.Fatalf("Unexpected number of access rules for share %s: got %d, expected 1", share.ID, len(rs)) + } + + t.Logf("Share %s has %d access rule(s):", share.ID, len(rs)) + + for _, r := range rs { + PrintAccessRight(t, &r) + } +} + +func TestExtendAndShrink(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a sharedfs client: %v", err) + } + + share, err := CreateShare(t, client) + if err != nil { + t.Fatalf("Unable to create a share: %v", err) + } + + defer DeleteShare(t, client, share) + + err = ExtendShare(t, client, share, 2) + if err != nil { + t.Fatalf("Unable to extend a share: %v", err) + } + + // We need to wait till the Extend operation is done + err = waitForStatus(client, share.ID, "available", 120) + if err != nil { + t.Fatalf("Share status error: %v", err) + } + + t.Logf("Share %s successfuly extended", share.ID) + + err = ShrinkShare(t, client, share, 1) + if err != nil { + t.Fatalf("Unable to shrink a share: %v", err) + } + + // We need to wait till the Shrink operation is done + err = waitForStatus(client, share.ID, "available", 120) + if err != nil { + t.Fatalf("Share status error: %v", err) + } + + t.Logf("Share %s successfuly shrunk", share.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes.go new file mode 100644 index 000000000000..97b44bd0efee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes.go @@ -0,0 +1,56 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes" +) + +// CreateShareType will create a share type with a random name. An +// error will be returned if the share type was unable to be created. +func CreateShareType(t *testing.T, client *gophercloud.ServiceClient) (*sharetypes.ShareType, error) { + if testing.Short() { + t.Skip("Skipping test that requires share type creation in short mode.") + } + + shareTypeName := tools.RandomString("ACPTTEST", 16) + t.Logf("Attempting to create share type: %s", shareTypeName) + + extraSpecsOps := sharetypes.ExtraSpecsOpts{ + DriverHandlesShareServers: true, + } + + createOpts := sharetypes.CreateOpts{ + Name: shareTypeName, + IsPublic: false, + ExtraSpecs: extraSpecsOps, + } + + shareType, err := sharetypes.Create(client, createOpts).Extract() + if err != nil { + return shareType, err + } + + return shareType, nil +} + +// DeleteShareType will delete a share type. An error will occur if +// the share type was unable to be deleted. +func DeleteShareType(t *testing.T, client *gophercloud.ServiceClient, shareType *sharetypes.ShareType) { + err := sharetypes.Delete(client, shareType.ID).ExtractErr() + if err != nil { + t.Fatalf("Failed to delete share type %s: %v", shareType.ID, err) + } + + t.Logf("Deleted share type: %s", shareType.ID) +} + +// PrintShareType will print a share type and all of its attributes. +func PrintShareType(t *testing.T, shareType *sharetypes.ShareType) { + t.Logf("Name: %s", shareType.Name) + t.Logf("ID: %s", shareType.ID) + t.Logf("OS share type access is public: %t", shareType.IsPublic) + t.Logf("Extra specs: %#v", shareType.ExtraSpecs) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes_test.go new file mode 100644 index 000000000000..9efe0359d1f1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/sharedfilesystems/v2/sharetypes_test.go @@ -0,0 +1,162 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes" +) + +func TestShareTypeCreateDestroy(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + shareType, err := CreateShareType(t, client) + if err != nil { + t.Fatalf("Unable to create share type: %v", err) + } + + PrintShareType(t, shareType) + + defer DeleteShareType(t, client, shareType) +} + +func TestShareTypeList(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + allPages, err := sharetypes.List(client, sharetypes.ListOpts{}).AllPages() + if err != nil { + t.Fatalf("Unable to retrieve share types: %v", err) + } + + allShareTypes, err := sharetypes.ExtractShareTypes(allPages) + if err != nil { + t.Fatalf("Unable to extract share types: %v", err) + } + + for _, shareType := range allShareTypes { + PrintShareType(t, &shareType) + } +} + +func TestShareTypeGetDefault(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create a shared file system client: %v", err) + } + + shareType, err := sharetypes.GetDefault(client).Extract() + if err != nil { + t.Fatalf("Unable to retrieve the default share type: %v", err) + } + + PrintShareType(t, shareType) +} + +func TestShareTypeExtraSpecs(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + shareType, err := CreateShareType(t, client) + if err != nil { + t.Fatalf("Unable to create share type: %v", err) + } + + options := sharetypes.SetExtraSpecsOpts{ + ExtraSpecs: map[string]interface{}{"my_new_key": "my_value"}, + } + + _, err = sharetypes.SetExtraSpecs(client, shareType.ID, options).Extract() + if err != nil { + t.Fatalf("Unable to set extra specs for Share type: %s", shareType.Name) + } + + extraSpecs, err := sharetypes.GetExtraSpecs(client, shareType.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve share type: %s", shareType.Name) + } + + if extraSpecs["driver_handles_share_servers"] != "True" { + t.Fatal("driver_handles_share_servers was expected to be true") + } + + if extraSpecs["my_new_key"] != "my_value" { + t.Fatal("my_new_key was expected to be equal to my_value") + } + + err = sharetypes.UnsetExtraSpecs(client, shareType.ID, "my_new_key").ExtractErr() + if err != nil { + t.Fatalf("Unable to unset extra specs for Share type: %s", shareType.Name) + } + + extraSpecs, err = sharetypes.GetExtraSpecs(client, shareType.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve share type: %s", shareType.Name) + } + + if _, ok := extraSpecs["my_new_key"]; ok { + t.Fatalf("my_new_key was expected to be unset for Share type: %s", shareType.Name) + } + + PrintShareType(t, shareType) + + defer DeleteShareType(t, client, shareType) +} + +func TestShareTypeAccess(t *testing.T) { + client, err := clients.NewSharedFileSystemV2Client() + if err != nil { + t.Fatalf("Unable to create shared file system client: %v", err) + } + + shareType, err := CreateShareType(t, client) + if err != nil { + t.Fatalf("Unable to create share type: %v", err) + } + + options := sharetypes.AccessOpts{ + Project: "9e3a5a44e0134445867776ef53a37605", + } + + err = sharetypes.AddAccess(client, shareType.ID, options).ExtractErr() + if err != nil { + t.Fatalf("Unable to add a new access to a share type: %v", err) + } + + access, err := sharetypes.ShowAccess(client, shareType.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve the access details for a share type: %v", err) + } + + expected := []sharetypes.ShareTypeAccess{{ShareTypeID: shareType.ID, ProjectID: options.Project}} + + if access[0] != expected[0] { + t.Fatal("Share type access is not the same than expected") + } + + err = sharetypes.RemoveAccess(client, shareType.ID, options).ExtractErr() + if err != nil { + t.Fatalf("Unable to remove an access from a share type: %v", err) + } + + access, err = sharetypes.ShowAccess(client, shareType.ID).Extract() + if err != nil { + t.Fatalf("Unable to retrieve the access details for a share type: %v", err) + } + + if len(access) > 0 { + t.Fatalf("No access should be left for the share type: %s", shareType.Name) + } + + PrintShareType(t, shareType) + + defer DeleteShareType(t, client, shareType) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflow.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflow.go new file mode 100644 index 000000000000..12f1cf934eaa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflow.go @@ -0,0 +1,55 @@ +package v2 + +import ( + "fmt" + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// GetEchoWorkflowDefinition returns a simple workflow definition that does nothing except a simple "echo" command. +func GetEchoWorkflowDefinition(workflowName string) string { + return fmt.Sprintf(`--- +version: '2.0' + +%s: + description: Simple workflow example + type: direct + + tasks: + test: + action: std.echo output="Hello World!"`, workflowName) +} + +// CreateWorkflow creates a workflow on Mistral API. +// The created workflow is a dummy workflow that performs a simple echo. +func CreateWorkflow(t *testing.T, client *gophercloud.ServiceClient) (*workflows.Workflow, error) { + workflowName := tools.RandomString("workflow_echo_", 5) + + definition := GetEchoWorkflowDefinition(workflowName) + + t.Logf("Attempting to create workflow: %s", workflowName) + + opts := &workflows.CreateOpts{ + Namespace: "some-namespace", + Scope: "private", + Definition: strings.NewReader(definition), + } + workflowList, err := workflows.Create(client, opts).Extract() + if err != nil { + return nil, err + } + th.AssertEquals(t, 1, len(workflowList)) + + workflow := workflowList[0] + + t.Logf("Workflow created: %s", workflowName) + + th.AssertEquals(t, workflowName, workflow.Name) + + return &workflow, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflows_test.go b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflows_test.go new file mode 100644 index 000000000000..f8bda024c458 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/openstack/workflow/v2/workflows_test.go @@ -0,0 +1,19 @@ +package v2 + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/clients" + "github.com/gophercloud/gophercloud/acceptance/tools" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestWorkflowsCreate(t *testing.T) { + client, err := clients.NewWorkflowV2Client() + th.AssertNoErr(t, err) + + workflow, err := CreateWorkflow(t, client) + th.AssertNoErr(t, err) + + tools.PrintResource(t, workflow) +} diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/tools/pkg.go b/vendor/github.com/gophercloud/gophercloud/acceptance/tools/pkg.go new file mode 100644 index 000000000000..f7eca1298a7f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/tools/pkg.go @@ -0,0 +1 @@ +package tools diff --git a/vendor/github.com/gophercloud/gophercloud/acceptance/tools/tools.go b/vendor/github.com/gophercloud/gophercloud/acceptance/tools/tools.go new file mode 100644 index 000000000000..d2fd298d38b0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/acceptance/tools/tools.go @@ -0,0 +1,73 @@ +package tools + +import ( + "crypto/rand" + "encoding/json" + "errors" + mrand "math/rand" + "testing" + "time" +) + +// ErrTimeout is returned if WaitFor takes longer than 300 second to happen. +var ErrTimeout = errors.New("Timed out") + +// WaitFor polls a predicate function once per second to wait for a certain state to arrive. +func WaitFor(predicate func() (bool, error)) error { + for i := 0; i < 300; i++ { + time.Sleep(1 * time.Second) + + satisfied, err := predicate() + if err != nil { + return err + } + if satisfied { + return nil + } + } + return ErrTimeout +} + +// MakeNewPassword generates a new string that's guaranteed to be different than the given one. +func MakeNewPassword(oldPass string) string { + randomPassword := RandomString("", 16) + for randomPassword == oldPass { + randomPassword = RandomString("", 16) + } + return randomPassword +} + +// RandomString generates a string of given length, but random content. +// All content will be within the ASCII graphic character set. +// (Implementation from Even Shaw's contribution on +// http://stackoverflow.com/questions/12771930/what-is-the-fastest-way-to-generate-a-long-random-string-in-go). +func RandomString(prefix string, n int) string { + const alphanum = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + var bytes = make([]byte, n) + rand.Read(bytes) + for i, b := range bytes { + bytes[i] = alphanum[b%byte(len(alphanum))] + } + return prefix + string(bytes) +} + +// RandomInt will return a random integer between a specified range. +func RandomInt(min, max int) int { + mrand.Seed(time.Now().Unix()) + return mrand.Intn(max-min) + min +} + +// Elide returns the first bit of its input string with a suffix of "..." if it's longer than +// a comfortable 40 characters. +func Elide(value string) string { + if len(value) > 40 { + return value[0:37] + "..." + } + return value +} + +// PrintResource returns a resource as a readable structure +func PrintResource(t *testing.T, resource interface{}) { + b, _ := json.MarshalIndent(resource, "", " ") + t.Logf(string(b)) +} diff --git a/vendor/github.com/gophercloud/gophercloud/auth_options.go b/vendor/github.com/gophercloud/gophercloud/auth_options.go new file mode 100644 index 000000000000..5e693585c2d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/auth_options.go @@ -0,0 +1,363 @@ +package gophercloud + +/* +AuthOptions stores information needed to authenticate to an OpenStack Cloud. +You can populate one manually, or use a provider's AuthOptionsFromEnv() function +to read relevant information from the standard environment variables. Pass one +to a provider's AuthenticatedClient function to authenticate and obtain a +ProviderClient representing an active session on that provider. + +Its fields are the union of those recognized by each identity implementation and +provider. + +An example of manually providing authentication information: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +An example of using AuthOptionsFromEnv(), where the environment variables can +be read from a file, such as a standard openrc file: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed by + // all of the identity services, it will often be populated by a provider-level + // function. + // + // The IdentityEndpoint is typically referred to as the "auth_url" or + // "OS_AUTH_URL" in the information provided by the cloud operator. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"-"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // The same fields are known as project_id and project_name in the Identity + // V3 API, but are collected as TenantID and TenantName here in both cases. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + // If DomainID or DomainName are provided, they will also apply to TenantName. + // It is not currently possible to authenticate with Username and a Domain + // and scope to a Project in a different Domain by using TenantName. To + // accomplish that, the ProjectID will need to be provided as the TenantID + // option. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud to + // cache your credentials in memory, and to allow Gophercloud to attempt to + // re-authenticate automatically if/when your token expires. If you set it to + // false, it will not cache these settings, but re-authentication will not be + // possible. This setting defaults to false. + // + // NOTE: The reauth function will try to re-authenticate endlessly if left + // unchecked. The way to limit the number of attempts is to provide a custom + // HTTP client to the provider client and provide a transport that implements + // the RoundTripper interface and stores the number of failed retries. For an + // example of this, see here: + // https://github.com/rackspace/rack/blob/1.0.0/auth/clients.go#L311 + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + // Scope determines the scoping of the authentication request. + Scope *AuthScope `json:"-"` +} + +// AuthScope allows a created token to be limited to a specific domain or project. +type AuthScope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// ToTokenV2CreateMap allows AuthOptions to satisfy the AuthOptionsBuilder +// interface in the v2 tokens package +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + // Populate the request map. + authMap := make(map[string]interface{}) + + if opts.Username != "" { + if opts.Password != "" { + authMap["passwordCredentials"] = map[string]interface{}{ + "username": opts.Username, + "password": opts.Password, + } + } else { + return nil, ErrMissingInput{Argument: "Password"} + } + } else if opts.TokenID != "" { + authMap["token"] = map[string]interface{}{ + "id": opts.TokenID, + } + } else { + return nil, ErrMissingInput{Argument: "Username"} + } + + if opts.TenantID != "" { + authMap["tenantId"] = opts.TenantID + } + if opts.TenantName != "" { + authMap["tenantName"] = opts.TenantName + } + + return map[string]interface{}{"auth": authMap}, nil +} + +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + type domainReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + } + + type projectReq struct { + Domain *domainReq `json:"domain,omitempty"` + Name *string `json:"name,omitempty"` + ID *string `json:"id,omitempty"` + } + + type userReq struct { + ID *string `json:"id,omitempty"` + Name *string `json:"name,omitempty"` + Password string `json:"password"` + Domain *domainReq `json:"domain,omitempty"` + } + + type passwordReq struct { + User userReq `json:"user"` + } + + type tokenReq struct { + ID string `json:"id"` + } + + type identityReq struct { + Methods []string `json:"methods"` + Password *passwordReq `json:"password,omitempty"` + Token *tokenReq `json:"token,omitempty"` + } + + type authReq struct { + Identity identityReq `json:"identity"` + } + + type request struct { + Auth authReq `json:"auth"` + } + + // Populate the request structure based on the provided arguments. Create and return an error + // if insufficient or incompatible information is present. + var req request + + if opts.Password == "" { + if opts.TokenID != "" { + // Because we aren't using password authentication, it's an error to also provide any of the user-based authentication + // parameters. + if opts.Username != "" { + return nil, ErrUsernameWithToken{} + } + if opts.UserID != "" { + return nil, ErrUserIDWithToken{} + } + if opts.DomainID != "" { + return nil, ErrDomainIDWithToken{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithToken{} + } + + // Configure the request for Token authentication. + req.Auth.Identity.Methods = []string{"token"} + req.Auth.Identity.Token = &tokenReq{ + ID: opts.TokenID, + } + } else { + // If no password or token ID are available, authentication can't continue. + return nil, ErrMissingPassword{} + } + } else { + // Password authentication. + req.Auth.Identity.Methods = []string{"password"} + + // At least one of Username and UserID must be specified. + if opts.Username == "" && opts.UserID == "" { + return nil, ErrUsernameOrUserID{} + } + + if opts.Username != "" { + // If Username is provided, UserID may not be provided. + if opts.UserID != "" { + return nil, ErrUsernameOrUserID{} + } + + // Either DomainID or DomainName must also be specified. + if opts.DomainID == "" && opts.DomainName == "" { + return nil, ErrDomainIDOrDomainName{} + } + + if opts.DomainID != "" { + if opts.DomainName != "" { + return nil, ErrDomainIDOrDomainName{} + } + + // Configure the request for Username and Password authentication with a DomainID. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{ID: &opts.DomainID}, + }, + } + } + + if opts.DomainName != "" { + // Configure the request for Username and Password authentication with a DomainName. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ + Name: &opts.Username, + Password: opts.Password, + Domain: &domainReq{Name: &opts.DomainName}, + }, + } + } + } + + if opts.UserID != "" { + // If UserID is specified, neither DomainID nor DomainName may be. + if opts.DomainID != "" { + return nil, ErrDomainIDWithUserID{} + } + if opts.DomainName != "" { + return nil, ErrDomainNameWithUserID{} + } + + // Configure the request for UserID and Password authentication. + req.Auth.Identity.Password = &passwordReq{ + User: userReq{ID: &opts.UserID, Password: opts.Password}, + } + } + } + + b, err := BuildRequestBody(req, "") + if err != nil { + return nil, err + } + + if len(scope) != 0 { + b["auth"].(map[string]interface{})["scope"] = scope + } + + return b, nil +} + +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + // For backwards compatibility. + // If AuthOptions.Scope was not set, try to determine it. + // This works well for common scenarios. + if opts.Scope == nil { + opts.Scope = new(AuthScope) + if opts.TenantID != "" { + opts.Scope.ProjectID = opts.TenantID + } else { + if opts.TenantName != "" { + opts.Scope.ProjectName = opts.TenantName + opts.Scope.DomainID = opts.DomainID + opts.Scope.DomainName = opts.DomainName + } + } + } + + if opts.Scope.ProjectName != "" { + // ProjectName provided: either DomainID or DomainName must also be supplied. + // ProjectID may not be supplied. + if opts.Scope.DomainID == "" && opts.Scope.DomainName == "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + if opts.Scope.ProjectID != "" { + return nil, ErrScopeProjectIDOrProjectName{} + } + + if opts.Scope.DomainID != "" { + // ProjectName + DomainID + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"id": &opts.Scope.DomainID}, + }, + }, nil + } + + if opts.Scope.DomainName != "" { + // ProjectName + DomainName + return map[string]interface{}{ + "project": map[string]interface{}{ + "name": &opts.Scope.ProjectName, + "domain": map[string]interface{}{"name": &opts.Scope.DomainName}, + }, + }, nil + } + } else if opts.Scope.ProjectID != "" { + // ProjectID provided. ProjectName, DomainID, and DomainName may not be provided. + if opts.Scope.DomainID != "" { + return nil, ErrScopeProjectIDAlone{} + } + if opts.Scope.DomainName != "" { + return nil, ErrScopeProjectIDAlone{} + } + + // ProjectID + return map[string]interface{}{ + "project": map[string]interface{}{ + "id": &opts.Scope.ProjectID, + }, + }, nil + } else if opts.Scope.DomainID != "" { + // DomainID provided. ProjectID, ProjectName, and DomainName may not be provided. + if opts.Scope.DomainName != "" { + return nil, ErrScopeDomainIDOrDomainName{} + } + + // DomainID + return map[string]interface{}{ + "domain": map[string]interface{}{ + "id": &opts.Scope.DomainID, + }, + }, nil + } else if opts.Scope.DomainName != "" { + // DomainName + return map[string]interface{}{ + "domain": map[string]interface{}{ + "name": &opts.Scope.DomainName, + }, + }, nil + } + + return nil, nil +} + +func (opts AuthOptions) CanReauth() bool { + return opts.AllowReauth +} diff --git a/vendor/github.com/gophercloud/gophercloud/doc.go b/vendor/github.com/gophercloud/gophercloud/doc.go new file mode 100644 index 000000000000..30067aa35275 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/doc.go @@ -0,0 +1,93 @@ +/* +Package gophercloud provides a multi-vendor interface to OpenStack-compatible +clouds. The library has a three-level hierarchy: providers, services, and +resources. + +Authenticating with Providers + +Provider structs represent the cloud providers that offer and manage a +collection of services. You will generally want to create one Provider +client per OpenStack cloud. + +Use your OpenStack credentials to create a Provider client. The +IdentityEndpoint is typically refered to as "auth_url" or "OS_AUTH_URL" in +information provided by the cloud operator. Additionally, the cloud may refer to +TenantID or TenantName as project_id and project_name. Credentials are +specified like so: + + opts := gophercloud.AuthOptions{ + IdentityEndpoint: "https://openstack.example.com:5000/v2.0", + Username: "{username}", + Password: "{password}", + TenantID: "{tenant_id}", + } + + provider, err := openstack.AuthenticatedClient(opts) + +You may also use the openstack.AuthOptionsFromEnv() helper function. This +function reads in standard environment variables frequently found in an +OpenStack `openrc` file. Again note that Gophercloud currently uses "tenant" +instead of "project". + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) + +Service Clients + +Service structs are specific to a provider and handle all of the logic and +operations for a particular OpenStack service. Examples of services include: +Compute, Object Storage, Block Storage. In order to define one, you need to +pass in the parent provider, like so: + + opts := gophercloud.EndpointOpts{Region: "RegionOne"} + + client := openstack.NewComputeV2(provider, opts) + +Resources + +Resource structs are the domain models that services make use of in order +to work with and represent the state of API resources: + + server, err := servers.Get(client, "{serverId}").Extract() + +Intermediate Result structs are returned for API operations, which allow +generic access to the HTTP headers, response body, and any errors associated +with the network transaction. To turn a result into a usable resource struct, +you must call the Extract method which is chained to the response, or an +Extract function from an applicable extension: + + result := servers.Get(client, "{serverId}") + + // Attempt to extract the disk configuration from the OS-DCF disk config + // extension: + config, err := diskconfig.ExtractGet(result) + +All requests that enumerate a collection return a Pager struct that is used to +iterate through the results one page at a time. Use the EachPage method on that +Pager to handle each successive Page in a closure, then use the appropriate +extraction method from that request's package to interpret that Page as a slice +of results: + + err := servers.List(client, nil).EachPage(func (page pagination.Page) (bool, error) { + s, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + // Handle the []servers.Server slice. + + // Return "false" or an error to prematurely stop fetching new pages. + return true, nil + }) + +If you want to obtain the entire collection of pages without doing any +intermediary processing on each page, you can use the AllPages method: + + allPages, err := servers.List(client, nil).AllPages() + allServers, err := servers.ExtractServers(allPages) + +This top-level package contains utility functions and data types that are used +throughout the provider and service packages. Of particular note for end users +are the AuthOptions and EndpointOpts structs. +*/ +package gophercloud diff --git a/vendor/github.com/gophercloud/gophercloud/docs/FAQ.md b/vendor/github.com/gophercloud/gophercloud/docs/FAQ.md new file mode 100644 index 000000000000..88a366a288b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/FAQ.md @@ -0,0 +1,148 @@ +# Tips + +## Implementing default logging and re-authentication attempts + +You can implement custom logging and/or limit re-auth attempts by creating a custom HTTP client +like the following and setting it as the provider client's HTTP Client (via the +`gophercloud.ProviderClient.HTTPClient` field): + +```go +//... + +// LogRoundTripper satisfies the http.RoundTripper interface and is used to +// customize the default Gophercloud RoundTripper to allow for logging. +type LogRoundTripper struct { + rt http.RoundTripper + numReauthAttempts int +} + +// newHTTPClient return a custom HTTP client that allows for logging relevant +// information before and after the HTTP request. +func newHTTPClient() http.Client { + return http.Client{ + Transport: &LogRoundTripper{ + rt: http.DefaultTransport, + }, + } +} + +// RoundTrip performs a round-trip HTTP request and logs relevant information about it. +func (lrt *LogRoundTripper) RoundTrip(request *http.Request) (*http.Response, error) { + glog.Infof("Request URL: %s\n", request.URL) + + response, err := lrt.rt.RoundTrip(request) + if response == nil { + return nil, err + } + + if response.StatusCode == http.StatusUnauthorized { + if lrt.numReauthAttempts == 3 { + return response, fmt.Errorf("Tried to re-authenticate 3 times with no success.") + } + lrt.numReauthAttempts++ + } + + glog.Debugf("Response Status: %s\n", response.Status) + + return response, nil +} + +endpoint := "https://127.0.0.1/auth" +pc := openstack.NewClient(endpoint) +pc.HTTPClient = newHTTPClient() + +//... +``` + + +## Implementing custom objects + +OpenStack request/response objects may differ among variable names or types. + +### Custom request objects + +To pass custom options to a request, implement the desired `OptsBuilder` interface. For +example, to pass in + +```go +type MyCreateServerOpts struct { + Name string + Size int +} +``` + +to `servers.Create`, simply implement the `servers.CreateOptsBuilder` interface: + +```go +func (o MyCreateServeropts) ToServerCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{ + "name": o.Name, + "size": o.Size, + }, nil +} +``` + +create an instance of your custom options object, and pass it to `servers.Create`: + +```go +// ... +myOpts := MyCreateServerOpts{ + Name: "s1", + Size: "100", +} +server, err := servers.Create(computeClient, myOpts).Extract() +// ... +``` + +### Custom response objects + +Some OpenStack services have extensions. Extensions that are supported in Gophercloud can be +combined to create a custom object: + +```go +// ... +type MyVolume struct { + volumes.Volume + tenantattr.VolumeExt +} + +var v struct { + MyVolume `json:"volume"` +} + +err := volumes.Get(client, volID).ExtractInto(&v) +// ... +``` + +## Overriding default `UnmarshalJSON` method + +For some response objects, a field may be a custom type or may be allowed to take on +different types. In these cases, overriding the default `UnmarshalJSON` method may be +necessary. To do this, declare the JSON `struct` field tag as "-" and create an `UnmarshalJSON` +method on the type: + +```go +// ... +type MyVolume struct { + ID string `json: "id"` + TimeCreated time.Time `json: "-"` +} + +func (r *MyVolume) UnmarshalJSON(b []byte) error { + type tmp MyVolume + var s struct { + tmp + TimeCreated gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.TimeCreated = time.Time(s.CreatedAt) + + return err +} +// ... +``` diff --git a/vendor/github.com/gophercloud/gophercloud/docs/MIGRATING.md b/vendor/github.com/gophercloud/gophercloud/docs/MIGRATING.md new file mode 100644 index 000000000000..ef1a2bb900ac --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/MIGRATING.md @@ -0,0 +1,32 @@ +# Compute + +## Floating IPs + +* `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingip` is now `github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips` +* `floatingips.Associate` and `floatingips.Disassociate` have been removed. +* `floatingips.DisassociateOpts` is now required to disassociate a Floating IP. + +## Security Groups + +* `secgroups.AddServerToGroup` is now `secgroups.AddServer`. +* `secgroups.RemoveServerFromGroup` is now `secgroups.RemoveServer`. + +## Servers + +* `servers.Reboot` now requires a `servers.RebootOpts` struct: + + ```golang + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + res := servers.Reboot(client, server.ID, rebootOpts) + ``` + +# Identity + +## V3 + +### Tokens + +* `Token.ExpiresAt` is now of type `gophercloud.JSONRFC3339Milli` instead of + `time.Time` diff --git a/vendor/github.com/gophercloud/gophercloud/docs/STYLEGUIDE.md b/vendor/github.com/gophercloud/gophercloud/docs/STYLEGUIDE.md new file mode 100644 index 000000000000..22a290094129 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/STYLEGUIDE.md @@ -0,0 +1,79 @@ + +## On Pull Requests + +- Please make sure to read our [contributing guide](/.github/CONTRIBUTING.md). + +- Before you start a PR there needs to be a Github issue and a discussion about it + on that issue with a core contributor, even if it's just a 'SGTM'. + +- A PR's description must reference the issue it closes with a `For ` (e.g. For #293). + +- A PR's description must contain link(s) to the line(s) in the OpenStack + source code (on Github) that prove(s) the PR code to be valid. Links to documentation + are not good enough. The link(s) should be to a non-`master` branch. For example, + a pull request implementing the creation of a Neutron v2 subnet might put the + following link in the description: + + https://github.com/openstack/neutron/blob/stable/mitaka/neutron/api/v2/attributes.py#L749 + + From that link, a reviewer (or user) can verify the fields in the request/response + objects in the PR. + +- A PR that is in-progress should have `[wip]` in front of the PR's title. When + ready for review, remove the `[wip]` and ping a core contributor with an `@`. + +- Forcing PRs to be small can have the effect of users submitting PRs in a hierarchical chain, with + one depending on the next. If a PR depends on another one, it should have a [Pending #PRNUM] + prefix in the PR title. In addition, it will be the PR submitter's responsibility to remove the + [Pending #PRNUM] tag once the PR has been updated with the merged, dependent PR. That will + let reviewers know it is ready to review. + +- A PR should be small. Even if you intend on implementing an entire + service, a PR should only be one route of that service + (e.g. create server or get server, but not both). + +- Unless explicitly asked, do not squash commits in the middle of a review; only + append. It makes it difficult for the reviewer to see what's changed from one + review to the next. + +- See [#583](https://github.com/gophercloud/gophercloud/issues/583) as an example of a + well-formatted issue which contains all relevant information we need to review and approve. + +## On Code + +- In re design: follow as closely as is reasonable the code already in the library. + Most operations (e.g. create, delete) admit the same design. + +- Unit tests and acceptance (integration) tests must be written to cover each PR. + Tests for operations with several options (e.g. list, create) should include all + the options in the tests. This will allow users to verify an operation on their + own infrastructure and see an example of usage. + +- If in doubt, ask in-line on the PR. + +### File Structure + +- The following should be used in most cases: + + - `requests.go`: contains all the functions that make HTTP requests and the + types associated with the HTTP request (parameters for URL, body, etc) + - `results.go`: contains all the response objects and their methods + - `urls.go`: contains the endpoints to which the requests are made + +### Naming + +- For methods on a type in `results.go`, the receiver should be named `r` and the + variable into which it will be unmarshalled `s`. + +- Functions in `requests.go`, with the exception of functions that return a + `pagination.Pager`, should be named returns of the name `r`. + +- Functions in `requests.go` that accept request bodies should accept as their + last parameter an `interface` named `OptsBuilder` (eg `CreateOptsBuilder`). + This `interface` should have at the least a method named `ToMap` + (eg `ToPortCreateMap`). + +- Functions in `requests.go` that accept query strings should accept as their + last parameter an `interface` named `OptsBuilder` (eg `ListOptsBuilder`). + This `interface` should have at the least a method named `ToQuery` + (eg `ToServerListQuery`). diff --git a/vendor/github.com/gophercloud/gophercloud/docs/assets/openlab.png b/vendor/github.com/gophercloud/gophercloud/docs/assets/openlab.png new file mode 100644 index 000000000000..63077ed27561 Binary files /dev/null and b/vendor/github.com/gophercloud/gophercloud/docs/assets/openlab.png differ diff --git a/vendor/github.com/gophercloud/gophercloud/docs/assets/vexxhost.png b/vendor/github.com/gophercloud/gophercloud/docs/assets/vexxhost.png new file mode 100644 index 000000000000..846db18f09a0 Binary files /dev/null and b/vendor/github.com/gophercloud/gophercloud/docs/assets/vexxhost.png differ diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/doc.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/doc.go new file mode 100644 index 000000000000..bbf39c542875 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/doc.go @@ -0,0 +1,13 @@ +/* +Package NAME manages and retrieves RESOURCE in the OpenStack SERVICE Service. + +Example to List RESOURCE + +Example to Create a RESOURCE + +Example to Update a RESOURCE + +Example to Delete a RESOURCE + +*/ +package RESOURCE diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/requests.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/requests.go new file mode 100644 index 000000000000..1c4bb4d3e90a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/requests.go @@ -0,0 +1,105 @@ +package RESOURCE + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToResourceListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { +} + +// ToResourceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToResourceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List retrieves a list of RESOURCES. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToResourceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ResourcePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details of a RESOURCE. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToResourceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a RESOURCE. +type CreateOpts struct { +} + +// ToResourceCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToResourceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resource") +} + +// Create creates a new RESOURCE. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToResourceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete deletes a RESOURCE. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToResourceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents parameters to update a RESOURCE. +type UpdateOpts struct { +} + +// ToUpdateCreateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToResourceUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resource") +} + +// Update modifies the attributes of a RESOURCE. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToResourceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/results.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/results.go new file mode 100644 index 000000000000..88272e1e1cf6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/results.go @@ -0,0 +1,83 @@ +package RESOURCE + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// RESOURCE represents... +type Resource struct { +} + +type commonResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a RESOURCE. +type GetResult struct { + commonResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a RESOURCE. +type CreateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a RESOURCE. +type UpdateResult struct { + commonResult +} + +// ResourcePage is a single page of RESOURCE results. +type ResourcePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of RESOURCES contains any results. +func (r ResourcePage) IsEmpty() (bool, error) { + resources, err := ExtractResources(r) + return len(resources) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ResourcePage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractResources returns a slice of Resources contained in a single page of +// results. +func ExtractResources(r pagination.Page) ([]Resource, error) { + var s struct { + Resources []Resource `json:"resources"` + } + err := (r.(ResourcePage)).ExtractInto(&s) + return s.Resources, err +} + +// Extract interprets any commonResult as a Resource. +func (r commonResult) Extract() (*Resource, error) { + var s struct { + Resource *Resource `json:"resource"` + } + err := r.ExtractInto(&s) + return s.Resource, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/fixtures.go new file mode 100644 index 000000000000..66e2a202a2e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/fixtures.go @@ -0,0 +1,118 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/service/vN/resources" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListResult provides a single page of RESOURCE results. +const ListResult = ` +{ +} +` + +// GetResult provides a Get result. +const GetResult = ` +{ +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ +} +` + +// UpdateResult provides an update result. +const UpdateResult = ` +{ +} +` + +// FirstResource is the first resource in the List request. +var FirstResource = resources.Resource{} + +// SecondResource is the second resource in the List request. +var SecondResource = resources.Resource{} + +// SecondResourceUpdated is how SecondResource should look after an Update. +var SecondResourceUpdated = resources.Resource{} + +// ExpectedResourcesSlice is the slice of resources expected to be returned from ListResult. +var ExpectedResourcesSlice = []resources.Resource{FirstResource, SecondResource} + +// HandleListResourceSuccessfully creates an HTTP handler at `/resources` on the +// test handler mux that responds with a list of two resources. +func HandleListResourceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResult) + }) +} + +// HandleGetResourceSuccessfully creates an HTTP handler at `/resources` on the +// test handler mux that responds with a single resource. +func HandleGetResourceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/resources/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResult) + }) +} + +// HandleCreateResourceSuccessfully creates an HTTP handler at `/resources` on the +// test handler mux that tests resource creation. +func HandleCreateResourceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetResult) + }) +} + +// HandleDeleteResourceSuccessfully creates an HTTP handler at `/resources` on the +// test handler mux that tests resource deletion. +func HandleDeleteResourceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/resources/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateResourceSuccessfully creates an HTTP handler at `/resources` on the +// test handler mux that tests resource update. +func HandleUpdateResourceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/resources/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateResult) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/requests_test.go new file mode 100644 index 000000000000..22f97a2269e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/testing/requests_test.go @@ -0,0 +1,89 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/service/vN/resources" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListResourcesSuccessfully(t) + + count := 0 + err := resources.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := resources.ExtractResources(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedResourcesSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestListResourcesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListResourcesSuccessfully(t) + + allPages, err := resources.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := resources.ExtractResources(allPages) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedResourcesSlice, actual) +} + +func TestGetResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetResourceSuccessfully(t) + + actual, err := resources.Get(client.ServiceClient(), "9fe1d3").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, SecondResource, *actual) +} + +func TestCreateResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateResourceSuccessfully(t) + + createOpts := resources.CreateOpts{ + Name: "resource two", + } + + actual, err := resources.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, SecondResource, *actual) +} + +func TestDeleteResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteResourceSuccessfully(t) + + res := resources.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateResourceSuccessfully(t) + + updateOpts := resources.UpdateOpts{ + Description: "Staging Resource", + } + + actual, err := resources.Update(client.ServiceClient(), "9fe1d3", updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, SecondResourceUpdated, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/urls.go b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/urls.go new file mode 100644 index 000000000000..603b8124c911 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/.template/urls.go @@ -0,0 +1,23 @@ +package RESOURCE + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("resource") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("resource", id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("resource") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("resource", id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("resource", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/README.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/README.md new file mode 100644 index 000000000000..14950b2bd6a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/README.md @@ -0,0 +1,12 @@ +Contributor Tutorial +==================== + +This tutorial is to help new contributors become familiar with the processes +used by the Gophercloud team when adding a new feature or fixing a bug. + +While we have a defined process for working on Gophercloud, we're very mindful +that everyone is new to this in the beginning. Please reach out for help or ask +for clarification if needed. No question is ever "dumb" or not worth our time +answering. + +To begin, go to [Step 1](step-01-introduction.md). diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-01-introduction.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-01-introduction.md new file mode 100644 index 000000000000..d806143d774e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-01-introduction.md @@ -0,0 +1,16 @@ +Step 1: Read Our Guides +======================== + +There are two introductory guides you should read before proceeding: + +* [CONTRIBUTING](/.github/CONTRIBUTING.md): The Contributing guide is a detailed + document which describes the different ways you can contribute to Gophercloud + and how to get started. This tutorial you're reading is very similar to that + guide, but presented in a different way. We still recommend you read it over. + +* [STYLE](/docs/STYLEGUIDE.md): The Style Guide documents coding conventions used + in the Gophercloud project. + +--- + +When you've finished reading those guides, proceed to [Step 2](step-02-issues.md). diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-02-issues.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-02-issues.md new file mode 100644 index 000000000000..a3ae2a237b54 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-02-issues.md @@ -0,0 +1,124 @@ +Step 2: Create an Issue +======================== + +Every patch / Pull Request requires a corresponding issue. If you're fixing +a bug for an existing issue, then there's no need to create a new issue. + +However, if no prior issue exists, you must create an issue. + +Reporting a Bug +--------------- + +When reporting a bug, please try to provide as much information as you +can. + +The following issues are good examples for reporting a bug: + +* https://github.com/gophercloud/gophercloud/issues/108 +* https://github.com/gophercloud/gophercloud/issues/212 +* https://github.com/gophercloud/gophercloud/issues/424 +* https://github.com/gophercloud/gophercloud/issues/588 +* https://github.com/gophercloud/gophercloud/issues/629 +* https://github.com/gophercloud/gophercloud/issues/647 + +Feature Request +--------------- + +If you've noticed that a feature is missing from Gophercloud, you'll also +need to create an issue before doing any work. This is start a discussion about +whether or not the feature should be included in Gophercloud. We don't want to +want to see you put in hours of work only to learn that the feature is out of +scope of the project. + +Feature requests can come in different forms: + +### Adding a Feature to Gophercloud Core + +The "core" of Gophercloud is the code which supports API requests and +responses: pagination, error handling, building request bodies, and parsing +response bodies are all examples of core code. + +Modifications to core will usually have the most amount of discussion than +other requests since a change to core will affect _all_ of Gophercloud. + +The following issues are examples of core change discussions: + +* https://github.com/gophercloud/gophercloud/issues/310 +* https://github.com/gophercloud/gophercloud/issues/613 +* https://github.com/gophercloud/gophercloud/issues/729 +* https://github.com/gophercloud/gophercloud/issues/713 + +### Adding a Missing Field + +If you've found a missing field in an existing struct, submit an issue to +request having it added. These kinds of issues are pretty easy to report +and resolve. + +You should also provide a link to the actual service's Python code which +defines the missing field. + +The following issues are examples of missing fields: + +* https://github.com/gophercloud/gophercloud/issues/620 +* https://github.com/gophercloud/gophercloud/issues/621 +* https://github.com/gophercloud/gophercloud/issues/658 + +There's one situation which can make adding fields more difficult: if the field +is part of an API extension rather than the base API itself. An example of this +can be seen in [this](https://github.com/gophercloud/gophercloud/issues/749) +issue. + +Here, a user reported fields missing in the `Get` function of +`networking/v2/networks`. The fields reported missing weren't missing at all, +they're just part of various Networking extensions located in +`networking/v2/extensions`. + +### Adding a Missing API Call + +If you've found a missing API action, create an issue with details of +the action. For example: + +* https://github.com/gophercloud/gophercloud/issues/715 +* https://github.com/gophercloud/gophercloud/issues/719 + +You'll want to make sure the API call is part of the upstream OpenStack project +and not an extension created by a third-party or vendor. Gophercloud only +supports the OpenStack projects proper. + +### Adding a Missing API Suite + +Adding support to a missing suite of API calls will require more than one Pull +Request. However, you can use a single issue for all PRs. + +Examples of issues which track the addition of a missing API suite are: + +* https://github.com/gophercloud/gophercloud/issues/539 +* https://github.com/gophercloud/gophercloud/issues/555 +* https://github.com/gophercloud/gophercloud/issues/571 +* https://github.com/gophercloud/gophercloud/issues/583 +* https://github.com/gophercloud/gophercloud/issues/605 + +Note how the issue breaks down the implementation by request types (Create, +Update, Delete, Get, List). + +Also note how these issues provide links to the service's Python code. These +links are not required for _issues_, but it's usually a good idea to provide +them, anyway. These links _are required_ for PRs and that will be covered in +detail in a later step of this tutorial. + +### Adding a Missing OpenStack Project + +These kinds of feature additions are large undertakings. Adding support for +an entire OpenStack project is something the Gophercloud team very much +appreciates, but you should be prepared for several weeks of work and +interaction with the Gophercloud team. + +An example of how to create an issue for an entire project can be seen +here: + +* https://github.com/gophercloud/gophercloud/issues/723 + +--- + +With all of the above in mind, proceed to [Step 3](step-03-code-hunting.md) to +learn about Code Hunting. diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-03-code-hunting.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-03-code-hunting.md new file mode 100644 index 000000000000..f773eec040fc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-03-code-hunting.md @@ -0,0 +1,104 @@ +Step 3: Code Hunting +==================== + +If you plan to submit a feature or bug fix to Gophercloud, you must be +able to prove your code correctly works with the OpenStack service in +question. + +Let's use the following issue as an example: +[https://github.com/gophercloud/gophercloud/issues/621](https://github.com/gophercloud/gophercloud/issues/621). +In this issue, there's a request being made to add support for +`availability_zone_hints` to the `networking/v2/networks` package. +Meaning, we want to change: + +```go +type Network struct { + ID string `json:"id"` + Name string `json:"name"` + AdminStateUp bool `json:"admin_state_up"` + Status string `json:"status"` + Subnets []string `json:"subnets"` + TenantID string `json:"tenant_id"` + Shared bool `json:"shared"` +} +``` + +to look like + +```go +type Network struct { + ID string `json:"id"` + Name string `json:"name"` + AdminStateUp bool `json:"admin_state_up"` + Status string `json:"status"` + Subnets []string `json:"subnets"` + TenantID string `json:"tenant_id"` + Shared bool `json:"shared"` + + AvailabilityZoneHints []string `json:"availability_zone_hints"` +} +``` + +We need to be sure that `availability_zone_hints` is a field which really does +exist in the OpenStack Neutron project and it's not a field which was added as +a customization to a single OpenStack cloud. + +In addition, we need to ensure that `availability_zone_hints` is really a +`[]string` and not a different kind of type. + +One way of verifying this is through the [OpenStack API reference +documentation](https://developer.openstack.org/api-ref/network/v2/). +However, the API docs might either be incorrect or they might not provide all of +the details we need to know in order to ensure this field is added correctly. + +> Note: when we say the API docs might be incorrect, we are _not_ implying +> that the API docs aren't useful or that the contributors who work on the API +> docs are wrong. OpenStack moves fast. Typos happen. Forgetting to update +> documentation happens. + +Since the OpenStack service itself correctly accepts and processes the fields, +the best source of information on how the field works is in the service code +itself. + +Continuing on with using #621 as an example, we can find the definition of +`availability_zone_hints` in the following piece of code: + +https://github.com/openstack/neutron/blob/8e9959725eda4063a318b4ba6af1e3494cad9e35/neutron/objects/network.py#L191 + +The above code confirms that `availability_zone_hints` is indeed part of the +`Network` object and that its type is a list of strings (`[]string`). + +This example is a best-case situation: the code is relatively easy to find +and it's simple to understand. However, there will be times when proving the +implementation in the service code is difficult. Make no mistake, this is _not_ +fun work. This can sometimes be more difficult than writing the actual patch +for Gophercloud. However, this is an essential step to ensuring the feature +or bug fix is correctly added to Gophercloud. + +Examples of good code hunting can be seen here: + +* https://github.com/gophercloud/gophercloud/issues/539 +* https://github.com/gophercloud/gophercloud/issues/555 +* https://github.com/gophercloud/gophercloud/issues/571 +* https://github.com/gophercloud/gophercloud/issues/583 +* https://github.com/gophercloud/gophercloud/issues/605 + +Code Hunting Tips +----------------- + +OpenStack projects differ from one to another. Code is organized in different +ways. However, the following tips should be useful across all projects. + +* The logic which implements Create and Delete actions is usually either located + in the "model" or "controller" portion of the code. + +* Use Github's search box to search for the exact field you're working on. + Review all results to gain a good understanding of everywhere the field is + used. + +* When adding a field, look for an object model or a schema of some sort. + +--- + +Proceed to [Step 4](step-04-acceptance-testing.md) to learn about Acceptance +Testing. diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-04-acceptance-testing.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-04-acceptance-testing.md new file mode 100644 index 000000000000..fe8271743960 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-04-acceptance-testing.md @@ -0,0 +1,27 @@ +Step 4: Acceptance Testing +========================== + +If we haven't started working on the feature or bug fix, why are we talking +about Acceptance Testing now? + +Before you implement a feature or bug fix, you _must_ be able to test your code +in a working OpenStack environment. Please do not submit code which you have +only tested with offline unit tests. + +Blindly submitting code is dangerous to the Gophercloud project. Developers +from all over the world use Gophercloud in many different projects. If you +submit code which is untested, it can cause these projects to break or become +unstable. + +And, to be frank, submitting untested code will inevitably cause someone else +to have to spend time fixing it. + +If you don't have an OpenStack environment to test with, we have lots of +documentation [here](/acceptance) to help you build your own small OpenStack +environment for testing. + +--- + +Once you've confirmed you are able to test your code, proceed to +[Step 5](step-05-pull-requests.md) to (finally!) start working on a Pull +Request. diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-05-pull-requests.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-05-pull-requests.md new file mode 100644 index 000000000000..cb340a92cac7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-05-pull-requests.md @@ -0,0 +1,189 @@ +Step 5: Writing the Code +======================== + +At this point, you should have: + +- [x] Identified a feature or bug fix +- [x] Opened an Issue about it +- [x] Located the project's service code which validates the feature or fix +- [x] Have an OpenStack environment available to test with + +Now it's time to write the actual code! We recommend reading over the +[CONTRIBUTING](/.github/CONTRIBUTING.md) guide again as a refresh. Notably +the [Getting Started](/.github/CONTRIBUTING.md#getting-started) section will +help you set up a `git` repository correctly. + +We encourage you to browse the existing Gophercloud code to find examples +of similar implementations. It would be a _very_ rare occurrence for you +to be implementing something that hasn't already been done. + +Use the existing packages as templates and mirror the style, naming, and +logic. + +Types of Pull Requests +---------------------- + +The amount of changes you plan to make will determine how much code you should +submit as Pull Requests. + +### A Single Bug Fix + +If you're implementing a single bug fix, then creating one `git` branch and +submitting one Pull Request is fine. + +### Adding a Single Field + +If you're adding a single field, then a single Pull Request is also fine. See +[#662](https://github.com/gophercloud/gophercloud/pull/662) as an example of +this. + +If you plan to add more than one missing field, you will need to open a Pull +Request for _each_ field. + +### Adding a Single API Call + +Single API calls can also be submitted as a single Pull Request. See +[#722](https://github.com/gophercloud/gophercloud/pull/722) as an example of +this. + +### Adding a Suite of API Calls + +If you're adding support for a "suite" of API calls (meaning: Create, Update, +Delete, Get), then you will need to create one Pull Request for _each_ call. + +The following Pull Requests are good examples of how to do this: + +* https://github.com/gophercloud/gophercloud/pull/584 +* https://github.com/gophercloud/gophercloud/pull/586 +* https://github.com/gophercloud/gophercloud/pull/587 +* https://github.com/gophercloud/gophercloud/pull/594 + +You can also use the provided [template](/docs/contributor-tutorial/.template) +as it contains a lot of the repeated boiler plate code seen in each resource. +However, please make sure to thoroughly review and edit it as needed. +Leaving templated portions in-place might be interpreted as rushing through +the work and will require further rounds of review to fix. + +### Adding an Entire OpenStack Project + +To add an entire OpenStack project, you must break each set of API calls into +individual Pull Requests. Implementing an entire project can be thought of as +implementing multiple API suites. + +An example of this can be seen from the Pull Requests referenced in +[#723](https://github.com/gophercloud/gophercloud/issues/723). + +What to Include in a Pull Request +--------------------------------- + +Each Pull Request should contain the following: + +1. The actual Go code to implement the feature or bug fix +2. Unit tests +3. Acceptance tests +4. Documentation + +Whether you want to bundle all of the above into a single commit or multiple +commits is up to you. Use your preferred style. + +### Unit Tests + +Unit tests should provide basic validation that your code works as intended. + +Please do not use JSON fixtures from the API reference documentation. Please +generate your own fixtures using the OpenStack environment you're +[testing](step-04-acceptance-testing.md) with. + +### Acceptance Tests + +Since unit tests are not run against an actual OpenStack environment, +acceptance tests can arguably be more important. The acceptance tests that you +include in your Pull Request should confirm that your implemented code works +as intended with an actual OpenStack environment. + +### Documentation + +All documentation in Gophercloud is done through in-line `godoc`. Please make +sure to document all fields, functions, and methods appropriately. In addition, +each package has a `doc.go` file which should be created or amended with +details of your Pull Request, where appropriate. + +Dealing with Related Pull Requests +---------------------------------- + +If you plan to open more than one Pull Request, it's only natural that code +from one Pull Request will be dependent on code from the prior Pull Request. + +There are two methods of handling this: + +### Create Independent Pull Requests + +With this method, each Pull Request has all of the code to fully implement +the code in question. Each Pull Request can be merged in any order because +it's self contained. + +Use the following `git` workflow to implement this method: + +```shell +$ git checkout master +$ git pull +$ git checkout -b identityv3-regions-create +$ (write your code) +$ git add . +$ git commit -m "Implementing Regions Create" + +$ git checkout master +$ git checkout -b identityv3-regions-update +$ (write your code) +$ git add . +$ git commit -m "Implementing Regions Update" +``` + +Advantages of this Method: + +* Pull Requests can be merged in any order +* Additional commits to one Pull Request are independent of other Pull Requests + +Disadvantages of this Method: + +* There will be _a lot_ of duplicate code in each Pull Request +* You will have to rebase all other Pull Requests and resolve a good amount of + merge conflicts. + +### Create a Chain of Pull Requests + +With this method, each Pull Request is based off of a previous Pull Request. +Pull Requests will have to be merged in a specific order since there is a +defined relationship. + +Use the following `git` workflow to implement this method: + +```shell +$ git checkout master +$ git pull +$ git checkout -b identityv3-regions-create +$ (write your code) +$ git add . +$ git commit -m "Implementing Regions Create" + +$ git checkout -b identityv3-regions-update +$ (write your code) +$ git add . +$ git commit -m "Implementing Regions Update" +``` + +Advantages of this Method: + +* Each Pull Request becomes smaller since you are building off of the last + +Disadvantages of this Method: + +* If a Pull Request requires changes, you will have to rebase _all_ child + Pull Requests based off of the parent. + +The choice of method is up to you. + +--- + +Once you have your code written, submit a Pull Request to Gophercloud and +proceed to [Step 6](step-06-code-review.md). diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-06-code-review.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-06-code-review.md new file mode 100644 index 000000000000..ac3b68808bda --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-06-code-review.md @@ -0,0 +1,93 @@ +Step 6: Code Review +=================== + +Once you've submitted a Pull Request, three things will happen automatically: + +1. Travis-CI will run a set of simple tests: + + a. Unit Tests + + b. Code Formatting checks + + c. `go vet` checks + +2. Coveralls will run a coverage test. +3. [OpenLab](https://openlabtesting.org/) will run acceptance tests. + +Depending on the results of the above, you might need to make additional +changes to your code. + +While you're working on the finishing touches to your code, it is helpful +to add a `[wip]` tag to the title of your Pull Request. + +You are most welcomed to take as much time as you need to work on your Pull +Request. As well, take advantage of the automatic testing that is done to +each commit. + +### Travis-CI + +If Travis reports code formatting issues, please make sure to run `gofmt` on all +of your code. Travis will also report errors with unit tests, so you should +ensure those are fixed, too. + +### Coveralls + +If Coveralls reports a decrease in test coverage, check and make sure you have +provided unit tests. A decrease in test coverage is _sometimes_ unavoidable and +ignorable. + +### OpenLab + +OpenLab does not yet run a full suite of acceptance tests, so it's possible +that the acceptance tests you've included were not run. When this happens, +a core member for Gophercloud will run the tests manually. + +There are times when a core reviewer does not have access to the resources +required to run the acceptance tests. When this happens, it is essential +that you've run them yourself (See [Step 4](step-04.md)). + +Request a Code Review +--------------------- + +When you feel your Pull Request is ready for review, please leave a comment +requesting a code review. If you don't explicitly ask for a code review, a +core member might not know the Pull Request is ready for review. + +Additionally, if there are parts of your implementation that you are unsure +about, please ask for help. We're more than happy to provide advice. + +During the code review process, a core member will review the code you've +submitted and either request changes or request additional information. +Generally these requests fall under the following categories: + +1. Code which needs to be reformatted (See our [Style Guide](/docs/STYLEGUIDE.md) + for conventions used. + +2. Requests for additional information about the validity of something. This + might happen because the included supporting service code URLs don't have + enough information. + +3. Missing unit tests or acceptance tests. + +Submitting Changes +------------------ + +If a code review requires changes to be submitted, please do not squash your +commits. Please only add new commits to the Pull Request. This is to help the +code reviewer see only the changes that were made. + +It's Never Personal +------------------- + +Code review is a healthy exercise where a new set of eyes can sometimes spot +items forgotten by the author. + +Please don't take change requests personally. Our intention is to ensure the +code is correct before merging. + +--- + +Once the code has been reviewed and approved, a core member will merge your +Pull Request. + +Please proceed to [Step 7](step-07-congratulations.md). diff --git a/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-07-congratulations.md b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-07-congratulations.md new file mode 100644 index 000000000000..e14b794143d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/docs/contributor-tutorial/step-07-congratulations.md @@ -0,0 +1,9 @@ +Step 7: Congratulations! +======================== + +At this point your code is merged and you've either fixed a bug or added a new +feature to Gophercloud! + +We completely understand that this has been a long process. We appreciate your +patience as well as the time you have taken for working on this. You've made +Gophercloud a better project with your work. diff --git a/vendor/github.com/gophercloud/gophercloud/endpoint_search.go b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go new file mode 100644 index 000000000000..2fbc3c97f14f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/endpoint_search.go @@ -0,0 +1,76 @@ +package gophercloud + +// Availability indicates to whom a specific service endpoint is accessible: +// the internet at large, internal networks only, or only to administrators. +// Different identity services use different terminology for these. Identity v2 +// lists them as different kinds of URLs within the service catalog ("adminURL", +// "internalURL", and "publicURL"), while v3 lists them as "Interfaces" in an +// endpoint's response. +type Availability string + +const ( + // AvailabilityAdmin indicates that an endpoint is only available to + // administrators. + AvailabilityAdmin Availability = "admin" + + // AvailabilityPublic indicates that an endpoint is available to everyone on + // the internet. + AvailabilityPublic Availability = "public" + + // AvailabilityInternal indicates that an endpoint is only available within + // the cluster's internal network. + AvailabilityInternal Availability = "internal" +) + +// EndpointOpts specifies search criteria used by queries against an +// OpenStack service catalog. The options must contain enough information to +// unambiguously identify one, and only one, endpoint within the catalog. +// +// Usually, these are passed to service client factory functions in a provider +// package, like "openstack.NewComputeV2()". +type EndpointOpts struct { + // Type [required] is the service type for the client (e.g., "compute", + // "object-store"). Generally, this will be supplied by the service client + // function, but a user-given value will be honored if provided. + Type string + + // Name [optional] is the service name for the client (e.g., "nova") as it + // appears in the service catalog. Services can have the same Type but a + // different Name, which is why both Type and Name are sometimes needed. + Name string + + // Region [required] is the geographic region in which the endpoint resides, + // generally specifying which datacenter should house your resources. + // Required only for services that span multiple regions. + Region string + + // Availability [optional] is the visibility of the endpoint to be returned. + // Valid types include the constants AvailabilityPublic, AvailabilityInternal, + // or AvailabilityAdmin from this package. + // + // Availability is not required, and defaults to AvailabilityPublic. Not all + // providers or services offer all Availability options. + Availability Availability +} + +/* +EndpointLocator is an internal function to be used by provider implementations. + +It provides an implementation that locates a single endpoint from a service +catalog for a specific ProviderClient based on user-provided EndpointOpts. The +provider then uses it to discover related ServiceClients. +*/ +type EndpointLocator func(EndpointOpts) (string, error) + +// ApplyDefaults is an internal method to be used by provider implementations. +// +// It sets EndpointOpts fields if not already set, including a default type. +// Currently, EndpointOpts.Availability defaults to the public endpoint. +func (eo *EndpointOpts) ApplyDefaults(t string) { + if eo.Type == "" { + eo.Type = t + } + if eo.Availability == "" { + eo.Availability = AvailabilityPublic + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/errors.go b/vendor/github.com/gophercloud/gophercloud/errors.go new file mode 100644 index 000000000000..a5fa68d6d55f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/errors.go @@ -0,0 +1,453 @@ +package gophercloud + +import ( + "fmt" + "strings" +) + +// BaseError is an error type that all other error types embed. +type BaseError struct { + DefaultErrString string + Info string +} + +func (e BaseError) Error() string { + e.DefaultErrString = "An error occurred while executing a Gophercloud request." + return e.choseErrString() +} + +func (e BaseError) choseErrString() string { + if e.Info != "" { + return e.Info + } + return e.DefaultErrString +} + +// ErrMissingInput is the error when input is required in a particular +// situation but not provided by the user +type ErrMissingInput struct { + BaseError + Argument string +} + +func (e ErrMissingInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing input for argument [%s]", e.Argument) + return e.choseErrString() +} + +// ErrInvalidInput is an error type used for most non-HTTP Gophercloud errors. +type ErrInvalidInput struct { + ErrMissingInput + Value interface{} +} + +func (e ErrInvalidInput) Error() string { + e.DefaultErrString = fmt.Sprintf("Invalid input provided for argument [%s]: [%+v]", e.Argument, e.Value) + return e.choseErrString() +} + +// ErrMissingEnvironmentVariable is the error when environment variable is required +// in a particular situation but not provided by the user +type ErrMissingEnvironmentVariable struct { + BaseError + EnvironmentVariable string +} + +func (e ErrMissingEnvironmentVariable) Error() string { + e.DefaultErrString = fmt.Sprintf("Missing environment variable [%s]", e.EnvironmentVariable) + return e.choseErrString() +} + +// ErrMissingAnyoneOfEnvironmentVariables is the error when anyone of the environment variables +// is required in a particular situation but not provided by the user +type ErrMissingAnyoneOfEnvironmentVariables struct { + BaseError + EnvironmentVariables []string +} + +func (e ErrMissingAnyoneOfEnvironmentVariables) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Missing one of the following environment variables [%s]", + strings.Join(e.EnvironmentVariables, ", "), + ) + return e.choseErrString() +} + +// ErrUnexpectedResponseCode is returned by the Request method when a response code other than +// those listed in OkCodes is encountered. +type ErrUnexpectedResponseCode struct { + BaseError + URL string + Method string + Expected []int + Actual int + Body []byte +} + +func (e ErrUnexpectedResponseCode) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Expected HTTP response code %v when accessing [%s %s], but got %d instead\n%s", + e.Expected, e.Method, e.URL, e.Actual, e.Body, + ) + return e.choseErrString() +} + +// ErrDefault400 is the default error type returned on a 400 HTTP response code. +type ErrDefault400 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault401 is the default error type returned on a 401 HTTP response code. +type ErrDefault401 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault403 is the default error type returned on a 403 HTTP response code. +type ErrDefault403 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault404 is the default error type returned on a 404 HTTP response code. +type ErrDefault404 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault405 is the default error type returned on a 405 HTTP response code. +type ErrDefault405 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault408 is the default error type returned on a 408 HTTP response code. +type ErrDefault408 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault429 is the default error type returned on a 429 HTTP response code. +type ErrDefault429 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault500 is the default error type returned on a 500 HTTP response code. +type ErrDefault500 struct { + ErrUnexpectedResponseCode +} + +// ErrDefault503 is the default error type returned on a 503 HTTP response code. +type ErrDefault503 struct { + ErrUnexpectedResponseCode +} + +func (e ErrDefault400) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Bad request with: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault401) Error() string { + return "Authentication failed" +} +func (e ErrDefault403) Error() string { + e.DefaultErrString = fmt.Sprintf( + "Request forbidden: [%s %s], error message: %s", + e.Method, e.URL, e.Body, + ) + return e.choseErrString() +} +func (e ErrDefault404) Error() string { + return "Resource not found" +} +func (e ErrDefault405) Error() string { + return "Method not allowed" +} +func (e ErrDefault408) Error() string { + return "The server timed out waiting for the request" +} +func (e ErrDefault429) Error() string { + return "Too many requests have been sent in a given amount of time. Pause" + + " requests, wait up to one minute, and try again." +} +func (e ErrDefault500) Error() string { + return "Internal Server Error" +} +func (e ErrDefault503) Error() string { + return "The service is currently unable to handle the request due to a temporary" + + " overloading or maintenance. This is a temporary condition. Try again later." +} + +// Err400er is the interface resource error types implement to override the error message +// from a 400 error. +type Err400er interface { + Error400(ErrUnexpectedResponseCode) error +} + +// Err401er is the interface resource error types implement to override the error message +// from a 401 error. +type Err401er interface { + Error401(ErrUnexpectedResponseCode) error +} + +// Err403er is the interface resource error types implement to override the error message +// from a 403 error. +type Err403er interface { + Error403(ErrUnexpectedResponseCode) error +} + +// Err404er is the interface resource error types implement to override the error message +// from a 404 error. +type Err404er interface { + Error404(ErrUnexpectedResponseCode) error +} + +// Err405er is the interface resource error types implement to override the error message +// from a 405 error. +type Err405er interface { + Error405(ErrUnexpectedResponseCode) error +} + +// Err408er is the interface resource error types implement to override the error message +// from a 408 error. +type Err408er interface { + Error408(ErrUnexpectedResponseCode) error +} + +// Err429er is the interface resource error types implement to override the error message +// from a 429 error. +type Err429er interface { + Error429(ErrUnexpectedResponseCode) error +} + +// Err500er is the interface resource error types implement to override the error message +// from a 500 error. +type Err500er interface { + Error500(ErrUnexpectedResponseCode) error +} + +// Err503er is the interface resource error types implement to override the error message +// from a 503 error. +type Err503er interface { + Error503(ErrUnexpectedResponseCode) error +} + +// ErrTimeOut is the error type returned when an operations times out. +type ErrTimeOut struct { + BaseError +} + +func (e ErrTimeOut) Error() string { + e.DefaultErrString = "A time out occurred" + return e.choseErrString() +} + +// ErrUnableToReauthenticate is the error type returned when reauthentication fails. +type ErrUnableToReauthenticate struct { + BaseError + ErrOriginal error +} + +func (e ErrUnableToReauthenticate) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to re-authenticate: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrErrorAfterReauthentication is the error type returned when reauthentication +// succeeds, but an error occurs afterword (usually an HTTP error). +type ErrErrorAfterReauthentication struct { + BaseError + ErrOriginal error +} + +func (e ErrErrorAfterReauthentication) Error() string { + e.DefaultErrString = fmt.Sprintf("Successfully re-authenticated, but got error executing request: %s", e.ErrOriginal) + return e.choseErrString() +} + +// ErrServiceNotFound is returned when no service in a service catalog matches +// the provided EndpointOpts. This is generally returned by provider service +// factory methods like "NewComputeV2()" and can mean that a service is not +// enabled for your account. +type ErrServiceNotFound struct { + BaseError +} + +func (e ErrServiceNotFound) Error() string { + e.DefaultErrString = "No suitable service could be found in the service catalog." + return e.choseErrString() +} + +// ErrEndpointNotFound is returned when no available endpoints match the +// provided EndpointOpts. This is also generally returned by provider service +// factory methods, and usually indicates that a region was specified +// incorrectly. +type ErrEndpointNotFound struct { + BaseError +} + +func (e ErrEndpointNotFound) Error() string { + e.DefaultErrString = "No suitable endpoint could be found in the service catalog." + return e.choseErrString() +} + +// ErrResourceNotFound is the error when trying to retrieve a resource's +// ID by name and the resource doesn't exist. +type ErrResourceNotFound struct { + BaseError + Name string + ResourceType string +} + +func (e ErrResourceNotFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Unable to find %s with name %s", e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrMultipleResourcesFound is the error when trying to retrieve a resource's +// ID by name and multiple resources have the user-provided name. +type ErrMultipleResourcesFound struct { + BaseError + Name string + Count int + ResourceType string +} + +func (e ErrMultipleResourcesFound) Error() string { + e.DefaultErrString = fmt.Sprintf("Found %d %ss matching %s", e.Count, e.ResourceType, e.Name) + return e.choseErrString() +} + +// ErrUnexpectedType is the error when an unexpected type is encountered +type ErrUnexpectedType struct { + BaseError + Expected string + Actual string +} + +func (e ErrUnexpectedType) Error() string { + e.DefaultErrString = fmt.Sprintf("Expected %s but got %s", e.Expected, e.Actual) + return e.choseErrString() +} + +func unacceptedAttributeErr(attribute string) string { + return fmt.Sprintf("The base Identity V3 API does not accept authentication by %s", attribute) +} + +func redundantWithTokenErr(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a TokenID", attribute) +} + +func redundantWithUserID(attribute string) string { + return fmt.Sprintf("%s may not be provided when authenticating with a UserID", attribute) +} + +// ErrAPIKeyProvided indicates that an APIKey was provided but can't be used. +type ErrAPIKeyProvided struct{ BaseError } + +func (e ErrAPIKeyProvided) Error() string { + return unacceptedAttributeErr("APIKey") +} + +// ErrTenantIDProvided indicates that a TenantID was provided but can't be used. +type ErrTenantIDProvided struct{ BaseError } + +func (e ErrTenantIDProvided) Error() string { + return unacceptedAttributeErr("TenantID") +} + +// ErrTenantNameProvided indicates that a TenantName was provided but can't be used. +type ErrTenantNameProvided struct{ BaseError } + +func (e ErrTenantNameProvided) Error() string { + return unacceptedAttributeErr("TenantName") +} + +// ErrUsernameWithToken indicates that a Username was provided, but token authentication is being used instead. +type ErrUsernameWithToken struct{ BaseError } + +func (e ErrUsernameWithToken) Error() string { + return redundantWithTokenErr("Username") +} + +// ErrUserIDWithToken indicates that a UserID was provided, but token authentication is being used instead. +type ErrUserIDWithToken struct{ BaseError } + +func (e ErrUserIDWithToken) Error() string { + return redundantWithTokenErr("UserID") +} + +// ErrDomainIDWithToken indicates that a DomainID was provided, but token authentication is being used instead. +type ErrDomainIDWithToken struct{ BaseError } + +func (e ErrDomainIDWithToken) Error() string { + return redundantWithTokenErr("DomainID") +} + +// ErrDomainNameWithToken indicates that a DomainName was provided, but token authentication is being used instead.s +type ErrDomainNameWithToken struct{ BaseError } + +func (e ErrDomainNameWithToken) Error() string { + return redundantWithTokenErr("DomainName") +} + +// ErrUsernameOrUserID indicates that neither username nor userID are specified, or both are at once. +type ErrUsernameOrUserID struct{ BaseError } + +func (e ErrUsernameOrUserID) Error() string { + return "Exactly one of Username and UserID must be provided for password authentication" +} + +// ErrDomainIDWithUserID indicates that a DomainID was provided, but unnecessary because a UserID is being used. +type ErrDomainIDWithUserID struct{ BaseError } + +func (e ErrDomainIDWithUserID) Error() string { + return redundantWithUserID("DomainID") +} + +// ErrDomainNameWithUserID indicates that a DomainName was provided, but unnecessary because a UserID is being used. +type ErrDomainNameWithUserID struct{ BaseError } + +func (e ErrDomainNameWithUserID) Error() string { + return redundantWithUserID("DomainName") +} + +// ErrDomainIDOrDomainName indicates that a username was provided, but no domain to scope it. +// It may also indicate that both a DomainID and a DomainName were provided at once. +type ErrDomainIDOrDomainName struct{ BaseError } + +func (e ErrDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName to authenticate by Username" +} + +// ErrMissingPassword indicates that no password was provided and no token is available. +type ErrMissingPassword struct{ BaseError } + +func (e ErrMissingPassword) Error() string { + return "You must provide a password to authenticate" +} + +// ErrScopeDomainIDOrDomainName indicates that a domain ID or Name was required in a Scope, but not present. +type ErrScopeDomainIDOrDomainName struct{ BaseError } + +func (e ErrScopeDomainIDOrDomainName) Error() string { + return "You must provide exactly one of DomainID or DomainName in a Scope with ProjectName" +} + +// ErrScopeProjectIDOrProjectName indicates that both a ProjectID and a ProjectName were provided in a Scope. +type ErrScopeProjectIDOrProjectName struct{ BaseError } + +func (e ErrScopeProjectIDOrProjectName) Error() string { + return "You must provide at most one of ProjectID or ProjectName in a Scope" +} + +// ErrScopeProjectIDAlone indicates that a ProjectID was provided with other constraints in a Scope. +type ErrScopeProjectIDAlone struct{ BaseError } + +func (e ErrScopeProjectIDAlone) Error() string { + return "ProjectID must be supplied alone in a Scope" +} + +// ErrScopeEmpty indicates that no credentials were provided in a Scope. +type ErrScopeEmpty struct{ BaseError } + +func (e ErrScopeEmpty) Error() string { + return "You must provide either a Project or Domain in a Scope" +} diff --git a/vendor/github.com/gophercloud/gophercloud/internal/pkg.go b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go new file mode 100644 index 000000000000..5bf0569ce8cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/pkg.go @@ -0,0 +1 @@ +package internal diff --git a/vendor/github.com/gophercloud/gophercloud/internal/testing/pkg.go b/vendor/github.com/gophercloud/gophercloud/internal/testing/pkg.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/testing/pkg.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/internal/testing/util_test.go b/vendor/github.com/gophercloud/gophercloud/internal/testing/util_test.go new file mode 100644 index 000000000000..12fd172b60b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/testing/util_test.go @@ -0,0 +1,42 @@ +package testing + +import ( + "reflect" + "testing" + + "github.com/gophercloud/gophercloud/internal" +) + +func TestRemainingKeys(t *testing.T) { + type User struct { + UserID string `json:"user_id"` + Username string `json:"username"` + Location string `json:"-"` + CreatedAt string `json:"-"` + Status string + IsAdmin bool + } + + userResponse := map[string]interface{}{ + "user_id": "abcd1234", + "username": "jdoe", + "location": "Hawaii", + "created_at": "2017-06-08T02:49:03.000000", + "status": "active", + "is_admin": "true", + "custom_field": "foo", + } + + expected := map[string]interface{}{ + "created_at": "2017-06-08T02:49:03.000000", + "is_admin": "true", + "custom_field": "foo", + } + + actual := internal.RemainingKeys(User{}, userResponse) + + isEqual := reflect.DeepEqual(expected, actual) + if !isEqual { + t.Fatalf("expected %s but got %s", expected, actual) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/internal/util.go b/vendor/github.com/gophercloud/gophercloud/internal/util.go new file mode 100644 index 000000000000..8efb283e7290 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/internal/util.go @@ -0,0 +1,34 @@ +package internal + +import ( + "reflect" + "strings" +) + +// RemainingKeys will inspect a struct and compare it to a map. Any struct +// field that does not have a JSON tag that matches a key in the map or +// a matching lower-case field in the map will be returned as an extra. +// +// This is useful for determining the extra fields returned in response bodies +// for resources that can contain an arbitrary or dynamic number of fields. +func RemainingKeys(s interface{}, m map[string]interface{}) (extras map[string]interface{}) { + extras = make(map[string]interface{}) + for k, v := range m { + extras[k] = v + } + + valueOf := reflect.ValueOf(s) + typeOf := reflect.TypeOf(s) + for i := 0; i < valueOf.NumField(); i++ { + field := typeOf.Field(i) + + lowerField := strings.ToLower(field.Name) + delete(extras, lowerField) + + if tagValue := field.Tag.Get("json"); tagValue != "" && tagValue != "-" { + delete(extras, tagValue) + } + } + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go new file mode 100644 index 000000000000..994b5550c91e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/auth_env.go @@ -0,0 +1,85 @@ +package openstack + +import ( + "os" + + "github.com/gophercloud/gophercloud" +) + +var nilOptions = gophercloud.AuthOptions{} + +/* +AuthOptionsFromEnv fills out an identity.AuthOptions structure with the +settings found on the various OpenStack OS_* environment variables. + +The following variables provide sources of truth: OS_AUTH_URL, OS_USERNAME, +OS_PASSWORD, OS_TENANT_ID, and OS_TENANT_NAME. + +Of these, OS_USERNAME, OS_PASSWORD, and OS_AUTH_URL must have settings, +or an error will result. OS_TENANT_ID, OS_TENANT_NAME, OS_PROJECT_ID, and +OS_PROJECT_NAME are optional. + +OS_TENANT_ID and OS_TENANT_NAME are mutually exclusive to OS_PROJECT_ID and +OS_PROJECT_NAME. If OS_PROJECT_ID and OS_PROJECT_NAME are set, they will +still be referred as "tenant" in Gophercloud. + +To use this function, first set the OS_* environment variables (for example, +by sourcing an `openrc` file), then: + + opts, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(opts) +*/ +func AuthOptionsFromEnv() (gophercloud.AuthOptions, error) { + authURL := os.Getenv("OS_AUTH_URL") + username := os.Getenv("OS_USERNAME") + userID := os.Getenv("OS_USERID") + password := os.Getenv("OS_PASSWORD") + tenantID := os.Getenv("OS_TENANT_ID") + tenantName := os.Getenv("OS_TENANT_NAME") + domainID := os.Getenv("OS_DOMAIN_ID") + domainName := os.Getenv("OS_DOMAIN_NAME") + + // If OS_PROJECT_ID is set, overwrite tenantID with the value. + if v := os.Getenv("OS_PROJECT_ID"); v != "" { + tenantID = v + } + + // If OS_PROJECT_NAME is set, overwrite tenantName with the value. + if v := os.Getenv("OS_PROJECT_NAME"); v != "" { + tenantName = v + } + + if authURL == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_AUTH_URL", + } + return nilOptions, err + } + + if username == "" && userID == "" { + err := gophercloud.ErrMissingAnyoneOfEnvironmentVariables{ + EnvironmentVariables: []string{"OS_USERNAME", "OS_USERID"}, + } + return nilOptions, err + } + + if password == "" { + err := gophercloud.ErrMissingEnvironmentVariable{ + EnvironmentVariable: "OS_PASSWORD", + } + return nilOptions, err + } + + ao := gophercloud.AuthOptions{ + IdentityEndpoint: authURL, + UserID: userID, + Username: username, + Password: password, + TenantID: tenantID, + TenantName: tenantName, + DomainID: domainID, + DomainName: domainName, + } + + return ao, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/doc.go new file mode 100644 index 000000000000..109f78f50672 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/doc.go @@ -0,0 +1,42 @@ +/* +Package quotasets enables retrieving and managing Block Storage quotas. + +Example to Get a Quota Set + + quotaset, err := quotasets.Get(blockStorageClient, "project-id").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) + +Example to Get Quota Set Usage + + quotaset, err := quotasets.GetUsage(blockStorageClient, "project-id").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) + +Example to Update a Quota Set + + updateOpts := quotasets.UpdateOpts{ + Volumes: gophercloud.IntToPointer(100), + } + + quotaset, err := quotasets.Update(blockStorageClient, "project-id", updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) + +Example to Delete a Quota Set + + err := quotasets.Delete(blockStorageClient, "project-id").ExtractErr() + if err != nil { + panic(err) + } +*/ +package quotasets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/requests.go new file mode 100644 index 000000000000..c89b4e3514ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/requests.go @@ -0,0 +1,94 @@ +package quotasets + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +// Get returns public data about a previously created QuotaSet. +func Get(client *gophercloud.ServiceClient, projectID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, projectID), &r.Body, nil) + return +} + +// GetDefaults returns public data about the project's default block storage quotas. +func GetDefaults(client *gophercloud.ServiceClient, projectID string) (r GetResult) { + _, r.Err = client.Get(getDefaultsURL(client, projectID), &r.Body, nil) + return +} + +// GetUsage returns detailed public data about a previously created QuotaSet. +func GetUsage(client *gophercloud.ServiceClient, projectID string) (r GetUsageResult) { + u := fmt.Sprintf("%s?usage=true", getURL(client, projectID)) + _, r.Err = client.Get(u, &r.Body, nil) + return +} + +// Updates the quotas for the given projectID and returns the new QuotaSet. +func Update(client *gophercloud.ServiceClient, projectID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToBlockStorageQuotaUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Put(updateURL(client, projectID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return r +} + +// UpdateOptsBuilder enables extensins to add parameters to the update request. +type UpdateOptsBuilder interface { + // Extra specific name to prevent collisions with interfaces for other quotas + // (e.g. neutron) + ToBlockStorageQuotaUpdateMap() (map[string]interface{}, error) +} + +// ToBlockStorageQuotaUpdateMap builds the update options into a serializable +// format. +func (opts UpdateOpts) ToBlockStorageQuotaUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "quota_set") +} + +// Options for Updating the quotas of a Tenant. +// All int-values are pointers so they can be nil if they are not needed. +// You can use gopercloud.IntToPointer() for convenience +type UpdateOpts struct { + // Volumes is the number of volumes that are allowed for each project. + Volumes *int `json:"volumes,omitempty"` + + // Snapshots is the number of snapshots that are allowed for each project. + Snapshots *int `json:"snapshots,omitempty"` + + // Gigabytes is the size (GB) of volumes and snapshots that are allowed for + // each project. + Gigabytes *int `json:"gigabytes,omitempty"` + + // PerVolumeGigabytes is the size (GB) of volumes and snapshots that are + // allowed for each project and the specifed volume type. + PerVolumeGigabytes *int `json:"per_volume_gigabytes,omitempty"` + + // Backups is the number of backups that are allowed for each project. + Backups *int `json:"backups,omitempty"` + + // BackupGigabytes is the size (GB) of backups that are allowed for each + // project. + BackupGigabytes *int `json:"backup_gigabytes,omitempty"` + + // Groups is the number of groups that are allowed for each project. + Groups *int `json:"groups,omitempty"` + + // Force will update the quotaset even if the quota has already been used + // and the reserved quota exceeds the new quota. + Force bool `json:"force,omitempty"` +} + +// Resets the quotas for the given tenant to their default values. +func Delete(client *gophercloud.ServiceClient, projectID string) (r DeleteResult) { + _, r.Err = client.Delete(updateURL(client, projectID), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/results.go new file mode 100644 index 000000000000..8ccfb5bfebca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/results.go @@ -0,0 +1,165 @@ +package quotasets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// QuotaSet is a set of operational limits that allow for control of block +// storage usage. +type QuotaSet struct { + // ID is project associated with this QuotaSet. + ID string `json:"id"` + + // Volumes is the number of volumes that are allowed for each project. + Volumes int `json:"volumes"` + + // Snapshots is the number of snapshots that are allowed for each project. + Snapshots int `json:"snapshots"` + + // Gigabytes is the size (GB) of volumes and snapshots that are allowed for + // each project. + Gigabytes int `json:"gigabytes"` + + // PerVolumeGigabytes is the size (GB) of volumes and snapshots that are + // allowed for each project and the specifed volume type. + PerVolumeGigabytes int `json:"per_volume_gigabytes"` + + // Backups is the number of backups that are allowed for each project. + Backups int `json:"backups"` + + // BackupGigabytes is the size (GB) of backups that are allowed for each + // project. + BackupGigabytes int `json:"backup_gigabytes"` +} + +// QuotaUsageSet represents details of both operational limits of block +// storage resources and the current usage of those resources. +type QuotaUsageSet struct { + // ID is the project ID associated with this QuotaUsageSet. + ID string `json:"id"` + + // Volumes is the volume usage information for this project, including + // in_use, limit, reserved and allocated attributes. Note: allocated + // attribute is available only when nested quota is enabled. + Volumes QuotaUsage `json:"volumes"` + + // Snapshots is the snapshot usage information for this project, including + // in_use, limit, reserved and allocated attributes. Note: allocated + // attribute is available only when nested quota is enabled. + Snapshots QuotaUsage `json:"snapshots"` + + // Gigabytes is the size (GB) usage information of volumes and snapshots + // for this project, including in_use, limit, reserved and allocated + // attributes. Note: allocated attribute is available only when nested + // quota is enabled. + Gigabytes QuotaUsage `json:"gigabytes"` + + // PerVolumeGigabytes is the size (GB) usage information for each volume, + // including in_use, limit, reserved and allocated attributes. Note: + // allocated attribute is available only when nested quota is enabled and + // only limit is meaningful here. + PerVolumeGigabytes QuotaUsage `json:"per_volume_gigabytes"` + + // Backups is the backup usage information for this project, including + // in_use, limit, reserved and allocated attributes. Note: allocated + // attribute is available only when nested quota is enabled. + Backups QuotaUsage `json:"backups"` + + // BackupGigabytes is the size (GB) usage information of backup for this + // project, including in_use, limit, reserved and allocated attributes. + // Note: allocated attribute is available only when nested quota is + // enabled. + BackupGigabytes QuotaUsage `json:"backup_gigabytes"` +} + +// QuotaUsage is a set of details about a single operational limit that allows +// for control of block storage usage. +type QuotaUsage struct { + // InUse is the current number of provisioned resources of the given type. + InUse int `json:"in_use"` + + // Allocated is the current number of resources of a given type allocated + // for use. It is only available when nested quota is enabled. + Allocated int `json:"allocated"` + + // Reserved is a transitional state when a claim against quota has been made + // but the resource is not yet fully online. + Reserved int `json:"reserved"` + + // Limit is the maximum number of a given resource that can be + // allocated/provisioned. This is what "quota" usually refers to. + Limit int `json:"limit"` +} + +// QuotaSetPage stores a single page of all QuotaSet results from a List call. +type QuotaSetPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a QuotaSetsetPage is empty. +func (r QuotaSetPage) IsEmpty() (bool, error) { + ks, err := ExtractQuotaSets(r) + return len(ks) == 0, err +} + +// ExtractQuotaSets interprets a page of results as a slice of QuotaSets. +func ExtractQuotaSets(r pagination.Page) ([]QuotaSet, error) { + var s struct { + QuotaSets []QuotaSet `json:"quotas"` + } + err := (r.(QuotaSetPage)).ExtractInto(&s) + return s.QuotaSets, err +} + +type quotaResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any QuotaSet resource response +// as a QuotaSet struct. +func (r quotaResult) Extract() (*QuotaSet, error) { + var s struct { + QuotaSet *QuotaSet `json:"quota_set"` + } + err := r.ExtractInto(&s) + return s.QuotaSet, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a QuotaSet. +type GetResult struct { + quotaResult +} + +// UpdateResult is the response from a Update operation. Call its Extract method +// to interpret it as a QuotaSet. +type UpdateResult struct { + quotaResult +} + +type quotaUsageResult struct { + gophercloud.Result +} + +// GetUsageResult is the response from a Get operation. Call its Extract +// method to interpret it as a QuotaSet. +type GetUsageResult struct { + quotaUsageResult +} + +// Extract is a method that attempts to interpret any QuotaUsageSet resource +// response as a set of QuotaUsageSet structs. +func (r quotaUsageResult) Extract() (QuotaUsageSet, error) { + var s struct { + QuotaUsageSet QuotaUsageSet `json:"quota_set"` + } + err := r.ExtractInto(&s) + return s.QuotaUsageSet, err +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/doc.go new file mode 100644 index 000000000000..30d864eb95f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/doc.go @@ -0,0 +1,2 @@ +// quotasets unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/fixtures.go new file mode 100644 index 000000000000..fe1a5d885dfa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/fixtures.go @@ -0,0 +1,162 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const FirstTenantID = "555544443333222211110000ffffeeee" + +var getExpectedJSONBody = ` +{ + "quota_set" : { + "volumes" : 8, + "snapshots" : 9, + "gigabytes" : 10, + "per_volume_gigabytes" : 11, + "backups" : 12, + "backup_gigabytes" : 13 + } +}` + +var getExpectedQuotaSet = quotasets.QuotaSet{ + Volumes: 8, + Snapshots: 9, + Gigabytes: 10, + PerVolumeGigabytes: 11, + Backups: 12, + BackupGigabytes: 13, +} + +var getUsageExpectedJSONBody = ` +{ + "quota_set" : { + "id": "555544443333222211110000ffffeeee", + "volumes" : { + "in_use": 15, + "limit": 16, + "reserved": 17 + }, + "snapshots" : { + "in_use": 18, + "limit": 19, + "reserved": 20 + }, + "gigabytes" : { + "in_use": 21, + "limit": 22, + "reserved": 23 + }, + "per_volume_gigabytes" : { + "in_use": 24, + "limit": 25, + "reserved": 26 + }, + "backups" : { + "in_use": 27, + "limit": 28, + "reserved": 29 + }, + "backup_gigabytes" : { + "in_use": 30, + "limit": 31, + "reserved": 32 + } + } + } +}` + +var getUsageExpectedQuotaSet = quotasets.QuotaUsageSet{ + ID: FirstTenantID, + Volumes: quotasets.QuotaUsage{InUse: 15, Limit: 16, Reserved: 17}, + Snapshots: quotasets.QuotaUsage{InUse: 18, Limit: 19, Reserved: 20}, + Gigabytes: quotasets.QuotaUsage{InUse: 21, Limit: 22, Reserved: 23}, + PerVolumeGigabytes: quotasets.QuotaUsage{InUse: 24, Limit: 25, Reserved: 26}, + Backups: quotasets.QuotaUsage{InUse: 27, Limit: 28, Reserved: 29}, + BackupGigabytes: quotasets.QuotaUsage{InUse: 30, Limit: 31, Reserved: 32}, +} + +var fullUpdateExpectedJSONBody = ` +{ + "quota_set": { + "volumes": 8, + "snapshots": 9, + "gigabytes": 10, + "per_volume_gigabytes": 11, + "backups": 12, + "backup_gigabytes": 13 + } +}` + +var fullUpdateOpts = quotasets.UpdateOpts{ + Volumes: gophercloud.IntToPointer(8), + Snapshots: gophercloud.IntToPointer(9), + Gigabytes: gophercloud.IntToPointer(10), + PerVolumeGigabytes: gophercloud.IntToPointer(11), + Backups: gophercloud.IntToPointer(12), + BackupGigabytes: gophercloud.IntToPointer(13), +} + +var fullUpdateExpectedQuotaSet = quotasets.QuotaSet{ + Volumes: 8, + Snapshots: 9, + Gigabytes: 10, + PerVolumeGigabytes: 11, + Backups: 12, + BackupGigabytes: 13, +} + +var partialUpdateExpectedJSONBody = ` +{ + "quota_set": { + "volumes": 200, + "snapshots": 0, + "gigabytes": 0, + "per_volume_gigabytes": 0, + "backups": 0, + "backup_gigabytes": 0 + } +}` + +var partialUpdateOpts = quotasets.UpdateOpts{ + Volumes: gophercloud.IntToPointer(200), + Snapshots: gophercloud.IntToPointer(0), + Gigabytes: gophercloud.IntToPointer(0), + PerVolumeGigabytes: gophercloud.IntToPointer(0), + Backups: gophercloud.IntToPointer(0), + BackupGigabytes: gophercloud.IntToPointer(0), +} + +var partiualUpdateExpectedQuotaSet = quotasets.QuotaSet{Volumes: 200} + +// HandleSuccessfulRequest configures the test server to respond to an HTTP request. +func HandleSuccessfulRequest(t *testing.T, httpMethod, uriPath, jsonOutput string, uriQueryParams map[string]string) { + + th.Mux.HandleFunc(uriPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, httpMethod) + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Content-Type", "application/json") + + if uriQueryParams != nil { + th.TestFormValues(t, r, uriQueryParams) + } + + fmt.Fprintf(w, jsonOutput) + }) +} + +// HandleDeleteSuccessfully tests quotaset deletion. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusOK) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/requests_test.go new file mode 100644 index 000000000000..922b8d336e27 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/testing/requests_test.go @@ -0,0 +1,80 @@ +package testing + +import ( + "errors" + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + uriQueryParms := map[string]string{} + HandleSuccessfulRequest(t, "GET", "/os-quota-sets/"+FirstTenantID, getExpectedJSONBody, uriQueryParms) + actual, err := quotasets.Get(client.ServiceClient(), FirstTenantID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &getExpectedQuotaSet, actual) +} + +func TestGetUsage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + uriQueryParms := map[string]string{"usage": "true"} + HandleSuccessfulRequest(t, "GET", "/os-quota-sets/"+FirstTenantID, getUsageExpectedJSONBody, uriQueryParms) + actual, err := quotasets.GetUsage(client.ServiceClient(), FirstTenantID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, getUsageExpectedQuotaSet, actual) +} + +func TestFullUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + uriQueryParms := map[string]string{} + HandleSuccessfulRequest(t, "PUT", "/os-quota-sets/"+FirstTenantID, fullUpdateExpectedJSONBody, uriQueryParms) + actual, err := quotasets.Update(client.ServiceClient(), FirstTenantID, fullUpdateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &fullUpdateExpectedQuotaSet, actual) +} + +func TestPartialUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + uriQueryParms := map[string]string{} + HandleSuccessfulRequest(t, "PUT", "/os-quota-sets/"+FirstTenantID, partialUpdateExpectedJSONBody, uriQueryParms) + actual, err := quotasets.Update(client.ServiceClient(), FirstTenantID, partialUpdateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &partiualUpdateExpectedQuotaSet, actual) +} + +type ErrorUpdateOpts quotasets.UpdateOpts + +func (opts ErrorUpdateOpts) ToBlockStorageQuotaUpdateMap() (map[string]interface{}, error) { + return nil, errors.New("This is an error") +} + +func TestErrorInToBlockStorageQuotaUpdateMap(t *testing.T) { + opts := &ErrorUpdateOpts{} + th.SetupHTTP() + defer th.TeardownHTTP() + HandleSuccessfulRequest(t, "PUT", "/os-quota-sets/"+FirstTenantID, "", nil) + _, err := quotasets.Update(client.ServiceClient(), FirstTenantID, opts).Extract() + if err == nil { + t.Fatal("Error handling failed") + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := quotasets.Delete(client.ServiceClient(), FirstTenantID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/urls.go new file mode 100644 index 000000000000..7d8e5ceb7a30 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/quotasets/urls.go @@ -0,0 +1,21 @@ +package quotasets + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-quota-sets" + +func getURL(c *gophercloud.ServiceClient, projectID string) string { + return c.ServiceURL(resourcePath, projectID) +} + +func getDefaultsURL(c *gophercloud.ServiceClient, projectID string) string { + return c.ServiceURL(resourcePath, projectID, "defaults") +} + +func updateURL(c *gophercloud.ServiceClient, projectID string) string { + return getURL(c, projectID) +} + +func deleteURL(c *gophercloud.ServiceClient, projectID string) string { + return getURL(c, projectID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go new file mode 100644 index 000000000000..b0a2c8ff30ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/doc.go @@ -0,0 +1,23 @@ +/* +Package schedulerstats returns information about block storage pool capacity +and utilisation. Example: + + listOpts := schedulerstats.ListOpts{ + Detail: true, + } + + allPages, err := schedulerstats.List(client, listOpts).AllPages() + if err != nil { + panic(err) + } + + allStats, err := schedulerstats.ExtractStoragePools(allPages) + if err != nil { + panic(err) + } + + for _, stat := range allStats { + fmt.Printf("%+v\n", stat) + } +*/ +package schedulerstats diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go new file mode 100644 index 000000000000..7b374dcd86a3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/requests.go @@ -0,0 +1,43 @@ +package schedulerstats + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStoragePoolsListQuery() (string, error) +} + +// ListOpts controls the view of data returned (e.g globally or per project) +// via tenant_id and the verbosity via detail. +type ListOpts struct { + // ID of the tenant to look up storage pools for. + TenantID string `q:"tenant_id"` + + // Whether to list extended details. + Detail bool `q:"detail"` +} + +// ToStoragePoolsListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStoragePoolsListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list storage pool information. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := storagePoolsListURL(client) + if opts != nil { + query, err := opts.ToStoragePoolsListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return StoragePoolPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go new file mode 100644 index 000000000000..9f5361195e04 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/results.go @@ -0,0 +1,98 @@ +package schedulerstats + +import ( + "encoding/json" + "math" + + "github.com/gophercloud/gophercloud/pagination" +) + +// Capabilities represents the information of an individual StoragePool. +type Capabilities struct { + // The following fields should be present in all storage drivers. + DriverVersion string `json:"driver_version"` + FreeCapacityGB float64 `json:"-"` + StorageProtocol string `json:"storage_protocol"` + TotalCapacityGB float64 `json:"-"` + VendorName string `json:"vendor_name"` + VolumeBackendName string `json:"volume_backend_name"` + + // The following fields are optional and may have empty values depending + // on the storage driver in use. + ReservedPercentage int64 `json:"reserved_percentage"` + LocationInfo string `json:"location_info"` + QoSSupport bool `json:"QoS_support"` + ProvisionedCapacityGB float64 `json:"provisioned_capacity_gb"` + MaxOverSubscriptionRatio string `json:"max_over_subscription_ratio"` + ThinProvisioningSupport bool `json:"thin_provisioning_support"` + ThickProvisioningSupport bool `json:"thick_provisioning_support"` + TotalVolumes int64 `json:"total_volumes"` + FilterFunction string `json:"filter_function"` + GoodnessFuction string `json:"goodness_function"` + Mutliattach bool `json:"multiattach"` + SparseCopyVolume bool `json:"sparse_copy_volume"` +} + +// StoragePool represents an individual StoragePool retrieved from the +// schedulerstats API. +type StoragePool struct { + Name string `json:"name"` + Capabilities Capabilities `json:"capabilities"` +} + +func (r *Capabilities) UnmarshalJSON(b []byte) error { + type tmp Capabilities + var s struct { + tmp + FreeCapacityGB interface{} `json:"free_capacity_gb"` + TotalCapacityGB interface{} `json:"total_capacity_gb"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Capabilities(s.tmp) + + // Generic function to parse a capacity value which may be a numeric + // value, "unknown", or "infinite" + parseCapacity := func(capacity interface{}) float64 { + if capacity != nil { + switch capacity.(type) { + case float64: + return capacity.(float64) + case string: + if capacity.(string) == "infinite" { + return math.Inf(1) + } + } + } + return 0.0 + } + + r.FreeCapacityGB = parseCapacity(s.FreeCapacityGB) + r.TotalCapacityGB = parseCapacity(s.TotalCapacityGB) + + return nil +} + +// StoragePoolPage is a single page of all List results. +type StoragePoolPage struct { + pagination.SinglePageBase +} + +// IsEmpty satisfies the IsEmpty method of the Page interface. It returns true +// if a List contains no results. +func (page StoragePoolPage) IsEmpty() (bool, error) { + va, err := ExtractStoragePools(page) + return len(va) == 0, err +} + +// ExtractStoragePools takes a List result and extracts the collection of +// StoragePools returned by the API. +func ExtractStoragePools(p pagination.Page) ([]StoragePool, error) { + var s struct { + StoragePools []StoragePool `json:"pools"` + } + err := (p.(StoragePoolPage)).ExtractInto(&s) + return s.StoragePools, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/fixtures.go new file mode 100644 index 000000000000..4031e29724ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/fixtures.go @@ -0,0 +1,106 @@ +package testing + +import ( + "fmt" + "math" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const StoragePoolsListBody = ` +{ + "pools": [ + { + "name": "rbd:cinder.volumes.ssd@cinder.volumes.ssd#cinder.volumes.ssd" + }, + { + "name": "rbd:cinder.volumes.hdd@cinder.volumes#cinder.volumes.hdd" + } + ] +} +` + +const StoragePoolsListBodyDetail = ` +{ + "pools": [ + { + "capabilities": { + "driver_version": "1.2.0", + "filter_function": null, + "free_capacity_gb": 64765, + "goodness_function": null, + "multiattach": false, + "reserved_percentage": 0, + "storage_protocol": "ceph", + "timestamp": "2016-11-24T10:33:51.248360", + "total_capacity_gb": 787947.93, + "vendor_name": "Open Source", + "volume_backend_name": "cinder.volumes.ssd" + }, + "name": "rbd:cinder.volumes.ssd@cinder.volumes.ssd#cinder.volumes.ssd" + }, + { + "capabilities": { + "driver_version": "1.2.0", + "filter_function": null, + "free_capacity_gb": "unknown", + "goodness_function": null, + "multiattach": false, + "reserved_percentage": 0, + "storage_protocol": "ceph", + "timestamp": "2016-11-24T10:33:43.138628", + "total_capacity_gb": "infinite", + "vendor_name": "Open Source", + "volume_backend_name": "cinder.volumes.hdd" + }, + "name": "rbd:cinder.volumes.hdd@cinder.volumes.hdd#cinder.volumes.hdd" + } + ] +} +` + +var ( + StoragePoolFake1 = schedulerstats.StoragePool{ + Name: "rbd:cinder.volumes.ssd@cinder.volumes.ssd#cinder.volumes.ssd", + Capabilities: schedulerstats.Capabilities{ + DriverVersion: "1.2.0", + FreeCapacityGB: 64765, + StorageProtocol: "ceph", + TotalCapacityGB: 787947.93, + VendorName: "Open Source", + VolumeBackendName: "cinder.volumes.ssd", + }, + } + + StoragePoolFake2 = schedulerstats.StoragePool{ + Name: "rbd:cinder.volumes.hdd@cinder.volumes.hdd#cinder.volumes.hdd", + Capabilities: schedulerstats.Capabilities{ + DriverVersion: "1.2.0", + FreeCapacityGB: 0.0, + StorageProtocol: "ceph", + TotalCapacityGB: math.Inf(1), + VendorName: "Open Source", + VolumeBackendName: "cinder.volumes.hdd", + }, + } +) + +func HandleStoragePoolsListSuccessfully(t *testing.T) { + testhelper.Mux.HandleFunc("/scheduler-stats/get_pools", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + r.ParseForm() + if r.FormValue("detail") == "true" { + fmt.Fprintf(w, StoragePoolsListBodyDetail) + } else { + fmt.Fprintf(w, StoragePoolsListBody) + } + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/requests_test.go new file mode 100644 index 000000000000..8a4ef5180ddd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/testing/requests_test.go @@ -0,0 +1,38 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats" + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListStoragePoolsDetail(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleStoragePoolsListSuccessfully(t) + + pages := 0 + err := schedulerstats.List(client.ServiceClient(), schedulerstats.ListOpts{Detail: true}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := schedulerstats.ExtractStoragePools(page) + testhelper.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 backends, got %d", len(actual)) + } + testhelper.CheckDeepEquals(t, StoragePoolFake1, actual[0]) + testhelper.CheckDeepEquals(t, StoragePoolFake2, actual[1]) + + return true, nil + }) + + testhelper.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go new file mode 100644 index 000000000000..c0ddb3695af7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/schedulerstats/urls.go @@ -0,0 +1,7 @@ +package schedulerstats + +import "github.com/gophercloud/gophercloud" + +func storagePoolsListURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("scheduler-stats", "get_pools") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/doc.go new file mode 100644 index 000000000000..b3fba4cd625d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/doc.go @@ -0,0 +1,22 @@ +/* +Package services returns information about the blockstorage services in the +OpenStack cloud. + +Example of Retrieving list of all services + + allPages, err := services.List(blockstorageClient, services.ListOpts{}).AllPages() + if err != nil { + panic(err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } +*/ + +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/requests.go new file mode 100644 index 000000000000..0edcfc9d7eb7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/requests.go @@ -0,0 +1,42 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToServiceListQuery() (string, error) +} + +// ListOpts holds options for listing Services. +type ListOpts struct { + // Filter the service list result by binary name of the service. + Binary string `q:"binary"` + + // Filter the service list result by host name of the service. + Host string `q:"host"` +} + +// ToServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list services. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/results.go new file mode 100644 index 000000000000..49ad48ef61b4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/results.go @@ -0,0 +1,84 @@ +package services + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Service represents a Blockstorage service in the OpenStack cloud. +type Service struct { + // The binary name of the service. + Binary string `json:"binary"` + + // The reason for disabling a service. + DisabledReason string `json:"disabled_reason"` + + // The name of the host. + Host string `json:"host"` + + // The state of the service. One of up or down. + State string `json:"state"` + + // The status of the service. One of available or unavailable. + Status string `json:"status"` + + // The date and time stamp when the extension was last updated. + UpdatedAt time.Time `json:"-"` + + // The availability zone name. + Zone string `json:"zone"` + + // The following fields are optional + + // The host is frozen or not. Only in cinder-volume service. + Frozen bool `json:"frozen"` + + // The cluster name. Only in cinder-volume service. + Cluster string `json:"cluster"` + + // The volume service replication status. Only in cinder-volume service. + ReplicationStatus string `json:"replication_status"` + + // The ID of active storage backend. Only in cinder-volume service. + ActiveBackendID string `json:"active_backend_id"` +} + +// UnmarshalJSON to override default +func (r *Service) UnmarshalJSON(b []byte) error { + type tmp Service + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Service(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +// ServicePage represents a single page of all Services from a List request. +type ServicePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Services contains any results. +func (page ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(page) + return len(services) == 0, err +} + +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Service []Service `json:"services"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Service, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/fixtures.go new file mode 100644 index 000000000000..9d14723c12d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/fixtures.go @@ -0,0 +1,97 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ServiceListBody is sample response to the List call +const ServiceListBody = ` +{ + "services": [{ + "status": "enabled", + "binary": "cinder-scheduler", + "zone": "nova", + "state": "up", + "updated_at": "2017-06-29T05:50:35.000000", + "host": "devstack", + "disabled_reason": null + }, + { + "status": "enabled", + "binary": "cinder-backup", + "zone": "nova", + "state": "up", + "updated_at": "2017-06-29T05:50:42.000000", + "host": "devstack", + "disabled_reason": null + }, + { + "status": "enabled", + "binary": "cinder-volume", + "zone": "nova", + "frozen": false, + "state": "up", + "updated_at": "2017-06-29T05:50:39.000000", + "cluster": null, + "host": "devstack@lvmdriver-1", + "replication_status": "disabled", + "active_backend_id": null, + "disabled_reason": null + }] +} +` + +// First service from the ServiceListBody +var FirstFakeService = services.Service{ + Binary: "cinder-scheduler", + DisabledReason: "", + Host: "devstack", + State: "up", + Status: "enabled", + UpdatedAt: time.Date(2017, 6, 29, 5, 50, 35, 0, time.UTC), + Zone: "nova", +} + +// Second service from the ServiceListBody +var SecondFakeService = services.Service{ + Binary: "cinder-backup", + DisabledReason: "", + Host: "devstack", + State: "up", + Status: "enabled", + UpdatedAt: time.Date(2017, 6, 29, 5, 50, 42, 0, time.UTC), + Zone: "nova", +} + +// Third service from the ServiceListBody +var ThirdFakeService = services.Service{ + ActiveBackendID: "", + Binary: "cinder-volume", + Cluster: "", + DisabledReason: "", + Frozen: false, + Host: "devstack@lvmdriver-1", + ReplicationStatus: "disabled", + State: "up", + Status: "enabled", + UpdatedAt: time.Date(2017, 6, 29, 5, 50, 39, 0, time.UTC), + Zone: "nova", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ServiceListBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/requests_test.go new file mode 100644 index 000000000000..4178c2369965 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/testing/requests_test.go @@ -0,0 +1,41 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services" + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListServices(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleListSuccessfully(t) + + pages := 0 + err := services.List(client.ServiceClient(), services.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := services.ExtractServices(page) + if err != nil { + return false, err + } + + if len(actual) != 3 { + t.Fatalf("Expected 3 services, got %d", len(actual)) + } + testhelper.CheckDeepEquals(t, FirstFakeService, actual[0]) + testhelper.CheckDeepEquals(t, SecondFakeService, actual[1]) + testhelper.CheckDeepEquals(t, ThirdFakeService, actual[2]) + + return true, nil + }) + + testhelper.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/urls.go new file mode 100644 index 000000000000..61d794007e91 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/services/urls.go @@ -0,0 +1,7 @@ +package services + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-services") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go new file mode 100644 index 000000000000..a78d3d0482c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/doc.go @@ -0,0 +1,86 @@ +/* +Package volumeactions provides information and interaction with volumes in the +OpenStack Block Storage service. A volume is a detachable block storage +device, akin to a USB hard drive. + +Example of Attaching a Volume to an Instance + + attachOpts := volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: server.ID, + } + + err := volumeactions.Attach(client, volume.ID, attachOpts).ExtractErr() + if err != nil { + panic(err) + } + + detachOpts := volumeactions.DetachOpts{ + AttachmentID: volume.Attachments[0].AttachmentID, + } + + err = volumeactions.Detach(client, volume.ID, detachOpts).ExtractErr() + if err != nil { + panic(err) + } + + +Example of Creating an Image from a Volume + + uploadImageOpts := volumeactions.UploadImageOpts{ + ImageName: "my_vol", + Force: true, + } + + volumeImage, err := volumeactions.UploadImage(client, volume.ID, uploadImageOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", volumeImage) + +Example of Extending a Volume's Size + + extendOpts := volumeactions.ExtendSizeOpts{ + NewSize: 100, + } + + err := volumeactions.ExtendSize(client, volume.ID, extendOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example of Initializing a Volume Connection + + connectOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + connectionInfo, err := volumeactions.InitializeConnection(client, volume.ID, connectOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", connectionInfo["data"]) + + terminateOpts := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + + err = volumeactions.TerminateConnection(client, volume.ID, terminateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeactions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go new file mode 100644 index 000000000000..d18bff555b54 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/requests.go @@ -0,0 +1,269 @@ +package volumeactions + +import ( + "github.com/gophercloud/gophercloud" +) + +// AttachOptsBuilder allows extensions to add additional parameters to the +// Attach request. +type AttachOptsBuilder interface { + ToVolumeAttachMap() (map[string]interface{}, error) +} + +// AttachMode describes the attachment mode for volumes. +type AttachMode string + +// These constants determine how a volume is attached. +const ( + ReadOnly AttachMode = "ro" + ReadWrite AttachMode = "rw" +) + +// AttachOpts contains options for attaching a Volume. +type AttachOpts struct { + // The mountpoint of this volume. + MountPoint string `json:"mountpoint,omitempty"` + + // The nova instance ID, can't set simultaneously with HostName. + InstanceUUID string `json:"instance_uuid,omitempty"` + + // The hostname of baremetal host, can't set simultaneously with InstanceUUID. + HostName string `json:"host_name,omitempty"` + + // Mount mode of this volume. + Mode AttachMode `json:"mode,omitempty"` +} + +// ToVolumeAttachMap assembles a request body based on the contents of a +// AttachOpts. +func (opts AttachOpts) ToVolumeAttachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-attach") +} + +// Attach will attach a volume based on the values in AttachOpts. +func Attach(client *gophercloud.ServiceClient, id string, opts AttachOptsBuilder) (r AttachResult) { + b, err := opts.ToVolumeAttachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// BeginDetach will mark the volume as detaching. +func BeginDetaching(client *gophercloud.ServiceClient, id string) (r BeginDetachingResult) { + b := map[string]interface{}{"os-begin_detaching": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// DetachOptsBuilder allows extensions to add additional parameters to the +// Detach request. +type DetachOptsBuilder interface { + ToVolumeDetachMap() (map[string]interface{}, error) +} + +// DetachOpts contains options for detaching a Volume. +type DetachOpts struct { + // AttachmentID is the ID of the attachment between a volume and instance. + AttachmentID string `json:"attachment_id,omitempty"` +} + +// ToVolumeDetachMap assembles a request body based on the contents of a +// DetachOpts. +func (opts DetachOpts) ToVolumeDetachMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-detach") +} + +// Detach will detach a volume based on volume ID. +func Detach(client *gophercloud.ServiceClient, id string, opts DetachOptsBuilder) (r DetachResult) { + b, err := opts.ToVolumeDetachMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Reserve will reserve a volume based on volume ID. +func Reserve(client *gophercloud.ServiceClient, id string) (r ReserveResult) { + b := map[string]interface{}{"os-reserve": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Unreserve will unreserve a volume based on volume ID. +func Unreserve(client *gophercloud.ServiceClient, id string) (r UnreserveResult) { + b := map[string]interface{}{"os-unreserve": make(map[string]interface{})} + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// InitializeConnectionOptsBuilder allows extensions to add additional parameters to the +// InitializeConnection request. +type InitializeConnectionOptsBuilder interface { + ToVolumeInitializeConnectionMap() (map[string]interface{}, error) +} + +// InitializeConnectionOpts hosts options for InitializeConnection. +// The fields are specific to the storage driver in use and the destination +// attachment. +type InitializeConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeInitializeConnectionMap assembles a request body based on the contents of a +// InitializeConnectionOpts. +func (opts InitializeConnectionOpts) ToVolumeInitializeConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-initialize_connection": b}, err +} + +// InitializeConnection initializes an iSCSI connection by volume ID. +func InitializeConnection(client *gophercloud.ServiceClient, id string, opts InitializeConnectionOptsBuilder) (r InitializeConnectionResult) { + b, err := opts.ToVolumeInitializeConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// TerminateConnectionOptsBuilder allows extensions to add additional parameters to the +// TerminateConnection request. +type TerminateConnectionOptsBuilder interface { + ToVolumeTerminateConnectionMap() (map[string]interface{}, error) +} + +// TerminateConnectionOpts hosts options for TerminateConnection. +type TerminateConnectionOpts struct { + IP string `json:"ip,omitempty"` + Host string `json:"host,omitempty"` + Initiator string `json:"initiator,omitempty"` + Wwpns []string `json:"wwpns,omitempty"` + Wwnns string `json:"wwnns,omitempty"` + Multipath *bool `json:"multipath,omitempty"` + Platform string `json:"platform,omitempty"` + OSType string `json:"os_type,omitempty"` +} + +// ToVolumeTerminateConnectionMap assembles a request body based on the contents of a +// TerminateConnectionOpts. +func (opts TerminateConnectionOpts) ToVolumeTerminateConnectionMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "connector") + return map[string]interface{}{"os-terminate_connection": b}, err +} + +// TerminateConnection terminates an iSCSI connection by volume ID. +func TerminateConnection(client *gophercloud.ServiceClient, id string, opts TerminateConnectionOptsBuilder) (r TerminateConnectionResult) { + b, err := opts.ToVolumeTerminateConnectionMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ExtendSizeOptsBuilder allows extensions to add additional parameters to the +// ExtendSize request. +type ExtendSizeOptsBuilder interface { + ToVolumeExtendSizeMap() (map[string]interface{}, error) +} + +// ExtendSizeOpts contains options for extending the size of an existing Volume. +// This object is passed to the volumes.ExtendSize function. +type ExtendSizeOpts struct { + // NewSize is the new size of the volume, in GB. + NewSize int `json:"new_size" required:"true"` +} + +// ToVolumeExtendSizeMap assembles a request body based on the contents of an +// ExtendSizeOpts. +func (opts ExtendSizeOpts) ToVolumeExtendSizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-extend") +} + +// ExtendSize will extend the size of the volume based on the provided information. +// This operation does not return a response body. +func ExtendSize(client *gophercloud.ServiceClient, id string, opts ExtendSizeOptsBuilder) (r ExtendSizeResult) { + b, err := opts.ToVolumeExtendSizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// UploadImageOptsBuilder allows extensions to add additional parameters to the +// UploadImage request. +type UploadImageOptsBuilder interface { + ToVolumeUploadImageMap() (map[string]interface{}, error) +} + +// UploadImageOpts contains options for uploading a Volume to image storage. +type UploadImageOpts struct { + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format,omitempty"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format,omitempty"` + + // The name of image that will be stored in glance. + ImageName string `json:"image_name,omitempty"` + + // Force image creation, usable if volume attached to instance. + Force bool `json:"force,omitempty"` +} + +// ToVolumeUploadImageMap assembles a request body based on the contents of a +// UploadImageOpts. +func (opts UploadImageOpts) ToVolumeUploadImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-volume_upload_image") +} + +// UploadImage will upload an image based on the values in UploadImageOptsBuilder. +func UploadImage(client *gophercloud.ServiceClient, id string, opts UploadImageOptsBuilder) (r UploadImageResult) { + b, err := opts.ToVolumeUploadImageMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// ForceDelete will delete the volume regardless of state. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ForceDeleteResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-force_delete": ""}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go new file mode 100644 index 000000000000..5cadd360f205 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/results.go @@ -0,0 +1,191 @@ +package volumeactions + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// AttachResult contains the response body and error from an Attach request. +type AttachResult struct { + gophercloud.ErrResult +} + +// BeginDetachingResult contains the response body and error from a BeginDetach +// request. +type BeginDetachingResult struct { + gophercloud.ErrResult +} + +// DetachResult contains the response body and error from a Detach request. +type DetachResult struct { + gophercloud.ErrResult +} + +// UploadImageResult contains the response body and error from an UploadImage +// request. +type UploadImageResult struct { + gophercloud.Result +} + +// ReserveResult contains the response body and error from a Reserve request. +type ReserveResult struct { + gophercloud.ErrResult +} + +// UnreserveResult contains the response body and error from an Unreserve +// request. +type UnreserveResult struct { + gophercloud.ErrResult +} + +// TerminateConnectionResult contains the response body and error from a +// TerminateConnection request. +type TerminateConnectionResult struct { + gophercloud.ErrResult +} + +// InitializeConnectionResult contains the response body and error from an +// InitializeConnection request. +type InitializeConnectionResult struct { + gophercloud.Result +} + +// ExtendSizeResult contains the response body and error from an ExtendSize request. +type ExtendSizeResult struct { + gophercloud.ErrResult +} + +// Extract will get the connection information out of the +// InitializeConnectionResult object. +// +// This will be a generic map[string]interface{} and the results will be +// dependent on the type of connection made. +func (r InitializeConnectionResult) Extract() (map[string]interface{}, error) { + var s struct { + ConnectionInfo map[string]interface{} `json:"connection_info"` + } + err := r.ExtractInto(&s) + return s.ConnectionInfo, err +} + +// ImageVolumeType contains volume type information obtained from UploadImage +// action. +type ImageVolumeType struct { + // The ID of a volume type. + ID string `json:"id"` + + // Human-readable display name for the volume type. + Name string `json:"name"` + + // Human-readable description for the volume type. + Description string `json:"display_description"` + + // Flag for public access. + IsPublic bool `json:"is_public"` + + // Extra specifications for volume type. + ExtraSpecs map[string]interface{} `json:"extra_specs"` + + // ID of quality of service specs. + QosSpecsID string `json:"qos_specs_id"` + + // Flag for deletion status of volume type. + Deleted bool `json:"deleted"` + + // The date when volume type was deleted. + DeletedAt time.Time `json:"-"` + + // The date when volume type was created. + CreatedAt time.Time `json:"-"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` +} + +func (r *ImageVolumeType) UnmarshalJSON(b []byte) error { + type tmp ImageVolumeType + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ImageVolumeType(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.DeletedAt = time.Time(s.DeletedAt) + + return err +} + +// VolumeImage contains information about volume uploaded to an image service. +type VolumeImage struct { + // The ID of a volume an image is created from. + VolumeID string `json:"id"` + + // Container format, may be bare, ofv, ova, etc. + ContainerFormat string `json:"container_format"` + + // Disk format, may be raw, qcow2, vhd, vdi, vmdk, etc. + DiskFormat string `json:"disk_format"` + + // Human-readable description for the volume. + Description string `json:"display_description"` + + // The ID of the created image. + ImageID string `json:"image_id"` + + // Human-readable display name for the image. + ImageName string `json:"image_name"` + + // Size of the volume in GB. + Size int `json:"size"` + + // Current status of the volume. + Status string `json:"status"` + + // The date when this volume was last updated. + UpdatedAt time.Time `json:"-"` + + // Volume type object of used volume. + VolumeType ImageVolumeType `json:"volume_type"` +} + +func (r *VolumeImage) UnmarshalJSON(b []byte) error { + type tmp VolumeImage + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = VolumeImage(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// Extract will get an object with info about the uploaded image out of the +// UploadImageResult object. +func (r UploadImageResult) Extract() (VolumeImage, error) { + var s struct { + VolumeImage VolumeImage `json:"os-volume_upload_image"` + } + err := r.ExtractInto(&s) + return s.VolumeImage, err +} + +// ForceDeleteResult contains the response body and error from a ForceDelete request. +type ForceDeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/doc.go new file mode 100644 index 000000000000..336406df1d35 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/doc.go @@ -0,0 +1,2 @@ +// volumeactions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/fixtures.go new file mode 100644 index 000000000000..4a0c21701ead --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/fixtures.go @@ -0,0 +1,287 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockAttachResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-attach": + { + "mountpoint": "/mnt", + "mode": "rw", + "instance_uuid": "50902f4f-a974-46a0-85e9-7efc5e22dfdd" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockBeginDetachingResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-begin_detaching": {} +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockDetachResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-detach": {} +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockUploadImageResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-volume_upload_image": { + "container_format": "bare", + "force": true, + "image_name": "test", + "disk_format": "raw" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "os-volume_upload_image": { + "container_format": "bare", + "display_description": null, + "id": "cd281d77-8217-4830-be95-9528227c105c", + "image_id": "ecb92d98-de08-45db-8235-bbafe317269c", + "image_name": "test", + "disk_format": "raw", + "size": 5, + "status": "uploading", + "updated_at": "2017-07-17T09:29:22.000000", + "volume_type": { + "created_at": "2016-05-04T08:54:14.000000", + "deleted": false, + "deleted_at": null, + "description": null, + "extra_specs": { + "volume_backend_name": "basic.ru-2a" + }, + "id": "b7133444-62f6-4433-8da3-70ac332229b7", + "is_public": true, + "name": "basic.ru-2a", + "updated_at": "2016-05-04T09:15:33.000000" + } + } +} + `) + }) +} + +func MockReserveResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-reserve": {} +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockUnreserveResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-unreserve": {} +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockInitializeConnectionResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-initialize_connection": + { + "connector": + { + "ip":"127.0.0.1", + "host":"stack", + "initiator":"iqn.1994-05.com.redhat:17cf566367d2", + "multipath": false, + "platform": "x86_64", + "os_type": "linux2" + } + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{ +"connection_info": { + "data": { + "target_portals": [ + "172.31.17.48:3260" + ], + "auth_method": "CHAP", + "auth_username": "5MLtcsTEmNN5jFVcT6ui", + "access_mode": "rw", + "target_lun": 0, + "volume_id": "cd281d77-8217-4830-be95-9528227c105c", + "target_luns": [ + 0 + ], + "target_iqns": [ + "iqn.2010-10.org.openstack:volume-cd281d77-8217-4830-be95-9528227c105c" + ], + "auth_password": "x854ZY5Re3aCkdNL", + "target_discovered": false, + "encrypted": false, + "qos_specs": null, + "target_iqn": "iqn.2010-10.org.openstack:volume-cd281d77-8217-4830-be95-9528227c105c", + "target_portal": "172.31.17.48:3260" + }, + "driver_volume_type": "iscsi" + } + }`) + }) +} + +func MockTerminateConnectionResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-terminate_connection": + { + "connector": + { + "ip":"127.0.0.1", + "host":"stack", + "initiator":"iqn.1994-05.com.redhat:17cf566367d2", + "multipath": true, + "platform": "x86_64", + "os_type": "linux2" + } + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockExtendSizeResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/cd281d77-8217-4830-be95-9528227c105c/action", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "os-extend": + { + "new_size": 3 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, `{}`) + }) +} + +func MockForceDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestBody(t, r, `{"os-force_delete":""}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/requests_test.go new file mode 100644 index 000000000000..38fae4788958 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/testing/requests_test.go @@ -0,0 +1,166 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestAttach(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAttachResponse(t) + + options := &volumeactions.AttachOpts{ + MountPoint: "/mnt", + Mode: "rw", + InstanceUUID: "50902f4f-a974-46a0-85e9-7efc5e22dfdd", + } + err := volumeactions.Attach(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestBeginDetaching(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockBeginDetachingResponse(t) + + err := volumeactions.BeginDetaching(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDetach(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDetachResponse(t) + + err := volumeactions.Detach(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", &volumeactions.DetachOpts{}).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUploadImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + MockUploadImageResponse(t) + options := &volumeactions.UploadImageOpts{ + ContainerFormat: "bare", + DiskFormat: "raw", + ImageName: "test", + Force: true, + } + + actual, err := volumeactions.UploadImage(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).Extract() + th.AssertNoErr(t, err) + + expected := volumeactions.VolumeImage{ + VolumeID: "cd281d77-8217-4830-be95-9528227c105c", + ContainerFormat: "bare", + DiskFormat: "raw", + Description: "", + ImageID: "ecb92d98-de08-45db-8235-bbafe317269c", + ImageName: "test", + Size: 5, + Status: "uploading", + UpdatedAt: time.Date(2017, 7, 17, 9, 29, 22, 0, time.UTC), + VolumeType: volumeactions.ImageVolumeType{ + ID: "b7133444-62f6-4433-8da3-70ac332229b7", + Name: "basic.ru-2a", + Description: "", + IsPublic: true, + ExtraSpecs: map[string]interface{}{"volume_backend_name": "basic.ru-2a"}, + QosSpecsID: "", + Deleted: false, + DeletedAt: time.Time{}, + CreatedAt: time.Date(2016, 5, 4, 8, 54, 14, 0, time.UTC), + UpdatedAt: time.Date(2016, 5, 4, 9, 15, 33, 0, time.UTC), + }, + } + th.AssertDeepEquals(t, expected, actual) +} + +func TestReserve(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockReserveResponse(t) + + err := volumeactions.Reserve(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUnreserve(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUnreserveResponse(t) + + err := volumeactions.Unreserve(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestInitializeConnection(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockInitializeConnectionResponse(t) + + options := &volumeactions.InitializeConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Disabled, + Platform: "x86_64", + OSType: "linux2", + } + _, err := volumeactions.InitializeConnection(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).Extract() + th.AssertNoErr(t, err) +} + +func TestTerminateConnection(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockTerminateConnectionResponse(t) + + options := &volumeactions.TerminateConnectionOpts{ + IP: "127.0.0.1", + Host: "stack", + Initiator: "iqn.1994-05.com.redhat:17cf566367d2", + Multipath: gophercloud.Enabled, + Platform: "x86_64", + OSType: "linux2", + } + err := volumeactions.TerminateConnection(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestExtendSize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockExtendSizeResponse(t) + + options := &volumeactions.ExtendSizeOpts{ + NewSize: 3, + } + + err := volumeactions.ExtendSize(client.ServiceClient(), "cd281d77-8217-4830-be95-9528227c105c", options).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestForceDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockForceDeleteResponse(t) + + res := volumeactions.ForceDelete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go new file mode 100644 index 000000000000..20486ed71944 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumeactions/urls.go @@ -0,0 +1,7 @@ +package volumeactions + +import "github.com/gophercloud/gophercloud" + +func actionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/doc.go new file mode 100644 index 000000000000..2f501649fee4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/doc.go @@ -0,0 +1,26 @@ +/* +Package volumetenants provides the ability to extend a volume result with +tenant/project information. Example: + + type VolumeWithTenant struct { + volumes.Volume + volumetenants.VolumeTenantExt + } + + var allVolumes []VolumeWithTenant + + allPages, err := volumes.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve volumes: %s", err) + } + + err = volumes.ExtractVolumesInto(allPages, &allVolumes) + if err != nil { + panic("Unable to extract volumes: %s", err) + } + + for _, volume := range allVolumes { + fmt.Println(volume.TenantID) + } +*/ +package volumetenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/results.go new file mode 100644 index 000000000000..821e523b7f01 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants/results.go @@ -0,0 +1,7 @@ +package volumetenants + +// VolumeTenantExt is an extension to the base Volume object +type VolumeTenantExt struct { + // TenantID is the id of the project that owns the volume. + TenantID string `json:"os-vol-tenant-attr:tenant_id"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go new file mode 100644 index 000000000000..25a7f845825a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/doc.go @@ -0,0 +1,17 @@ +/* +Package noauth creates a "noauth" *gophercloud.ServiceClient for use in Cinder +environments configured with the noauth authentication middleware. + +Example of Creating a noauth Service Client + + provider, err := noauth.NewClient(gophercloud.AuthOptions{ + Username: os.Getenv("OS_USERNAME"), + TenantName: os.Getenv("OS_TENANT_NAME"), + }) + client, err := noauth.NewBlockStorageNoAuth(provider, noauth.EndpointOpts{ + CinderEndpoint: os.Getenv("CINDER_ENDPOINT"), + }) + + An example of a CinderEndpoint would be: http://example.com:8776/v2, +*/ +package noauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go new file mode 100644 index 000000000000..21cc8f09df8e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/requests.go @@ -0,0 +1,55 @@ +package noauth + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// EndpointOpts specifies a "noauth" Cinder Endpoint. +type EndpointOpts struct { + // CinderEndpoint [required] is currently only used with "noauth" Cinder. + // A cinder endpoint with "auth_strategy=noauth" is necessary, for example: + // http://example.com:8776/v2. + CinderEndpoint string +} + +// NewClient prepares an unauthenticated ProviderClient instance. +func NewClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + if options.Username == "" { + options.Username = "admin" + } + if options.TenantName == "" { + options.TenantName = "admin" + } + + client := &gophercloud.ProviderClient{ + TokenID: fmt.Sprintf("%s:%s", options.Username, options.TenantName), + } + + return client, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo EndpointOpts) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + if eo.CinderEndpoint == "" { + return nil, fmt.Errorf("CinderEndpoint is required") + } + + token := strings.Split(client.TokenID, ":") + if len(token) != 2 { + return nil, fmt.Errorf("Malformed noauth token") + } + + endpoint := fmt.Sprintf("%s%s", gophercloud.NormalizeURL(eo.CinderEndpoint), token[1]) + sc.Endpoint = gophercloud.NormalizeURL(endpoint) + sc.ProviderClient = client + return sc, nil +} + +// NewBlockStorageNoAuth creates a ServiceClient that may be used to access a +// "noauth" block storage service (V2 or V3 Cinder API). +func NewBlockStorageNoAuth(client *gophercloud.ProviderClient, eo EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/doc.go new file mode 100644 index 000000000000..425ab60553d1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/doc.go @@ -0,0 +1,2 @@ +// noauth unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/fixtures.go new file mode 100644 index 000000000000..f78bda3c5f3b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/fixtures.go @@ -0,0 +1,19 @@ +package testing + +// NoAuthResult is the expected result of the noauth Service Client +type NoAuthResult struct { + TokenID string + Endpoint string +} + +var naTestResult = NoAuthResult{ + TokenID: "user:test", + Endpoint: "http://cinder:8776/v2/test/", +} + +var naResult = NoAuthResult{ + TokenID: "admin:admin", + Endpoint: "http://cinder:8776/v2/admin/", +} + +var errorResult = "CinderEndpoint is required" diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/requests_test.go new file mode 100644 index 000000000000..14080259aaba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/noauth/testing/requests_test.go @@ -0,0 +1,38 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/blockstorage/noauth" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestNoAuth(t *testing.T) { + ao := gophercloud.AuthOptions{ + Username: "user", + TenantName: "test", + } + provider, err := noauth.NewClient(ao) + th.AssertNoErr(t, err) + noauthClient, err := noauth.NewBlockStorageNoAuth(provider, noauth.EndpointOpts{ + CinderEndpoint: "http://cinder:8776/v2", + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, naTestResult.Endpoint, noauthClient.Endpoint) + th.AssertEquals(t, naTestResult.TokenID, noauthClient.TokenID) + + ao2 := gophercloud.AuthOptions{} + provider2, err := noauth.NewClient(ao2) + th.AssertNoErr(t, err) + noauthClient2, err := noauth.NewBlockStorageNoAuth(provider2, noauth.EndpointOpts{ + CinderEndpoint: "http://cinder:8776/v2/", + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, naResult.Endpoint, noauthClient2.Endpoint) + th.AssertEquals(t, naResult.TokenID, noauthClient2.TokenID) + + errTest, err := noauth.NewBlockStorageNoAuth(provider2, noauth.EndpointOpts{}) + _ = errTest + th.AssertEquals(t, errorResult, err.Error()) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go new file mode 100644 index 000000000000..e3af39f513a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/doc.go @@ -0,0 +1,3 @@ +// Package apiversions provides information and interaction with the different +// API versions for the OpenStack Block Storage service, code-named Cinder. +package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go new file mode 100644 index 000000000000..725c13a76150 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/requests.go @@ -0,0 +1,20 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List lists all the Cinder API versions available to end-users. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// Get will retrieve the volume type with the provided ID. To extract the volume +// type from the result, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, v string) (r GetResult) { + _, r.Err = client.Get(getURL(client, v), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go new file mode 100644 index 000000000000..f510c6d103ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/results.go @@ -0,0 +1,49 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// APIVersion represents an API version for Cinder. +type APIVersion struct { + ID string `json:"id"` // unique identifier + Status string `json:"status"` // current status + Updated string `json:"updated"` // date last updated +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + return len(is) == 0, err +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := (r.(APIVersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an API version resource. +func (r GetResult) Extract() (*APIVersion, error) { + var s struct { + Version *APIVersion `json:"version"` + } + err := r.ExtractInto(&s) + return s.Version, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/doc.go new file mode 100644 index 000000000000..12e4bda0f9ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/doc.go @@ -0,0 +1,2 @@ +// apiversions_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/fixtures.go new file mode 100644 index 000000000000..885fdf659a7f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/fixtures.go @@ -0,0 +1,91 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "versions": [ + { + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v1/", + "rel": "self" + } + ] + }, + { + "status": "CURRENT", + "updated": "2012-11-21T11:33:21Z", + "id": "v2.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v2/", + "rel": "self" + } + ] + } + ] + }`) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/v1/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "version": { + "status": "CURRENT", + "updated": "2012-01-04T11:33:21Z", + "media-types": [ + { + "base": "application/xml", + "type": "application/vnd.openstack.volume+xml;version=1" + }, + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=1" + } + ], + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8776/v1/", + "rel": "self" + }, + { + "href": "http://jorgew.github.com/block-storage-api/content/os-block-storage-1.0.pdf", + "type": "application/pdf", + "rel": "describedby" + }, + { + "href": "http://docs.rackspacecloud.com/servers/api/v1.1/application.wadl", + "type": "application/vnd.sun.wadl+xml", + "rel": "describedby" + } + ] + } + }`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/requests_test.go new file mode 100644 index 000000000000..31034970cfa6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/testing/requests_test.go @@ -0,0 +1,64 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + apiversions.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := apiversions.ExtractAPIVersions(page) + th.AssertNoErr(t, err) + + expected := []apiversions.APIVersion{ + { + ID: "v1.0", + Status: "CURRENT", + Updated: "2012-01-04T11:33:21Z", + }, + { + ID: "v2.0", + Status: "CURRENT", + Updated: "2012-11-21T11:33:21Z", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) +} + +func TestAPIInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + actual, err := apiversions.Get(client.ServiceClient(), "v1").Extract() + th.AssertNoErr(t, err) + + expected := apiversions.APIVersion{ + ID: "v1.0", + Status: "CURRENT", + Updated: "2012-01-04T11:33:21Z", + } + + th.AssertEquals(t, actual.ID, expected.ID) + th.AssertEquals(t, actual.Status, expected.Status) + th.AssertEquals(t, actual.Updated, expected.Updated) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go new file mode 100644 index 000000000000..d1861ac196d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions/urls.go @@ -0,0 +1,18 @@ +package apiversions + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +func getURL(c *gophercloud.ServiceClient, version string) string { + return c.ServiceURL(strings.TrimRight(version, "/") + "/") +} + +func listURL(c *gophercloud.ServiceClient) string { + u, _ := url.Parse(c.ServiceURL("")) + u.Path = "/" + return u.String() +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/doc.go new file mode 100644 index 000000000000..198f83077c59 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/requests.go new file mode 100644 index 000000000000..fed1252ac991 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/requests.go @@ -0,0 +1,163 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + VolumeID string `json:"volume_id" required:"true"` + Description string `json:"display_description,omitempty"` + Force bool `json:"force,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + Name string `json:"display_name,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +// ListOpts hold options for listing Snapshots. It is passed to the +// snapshots.List function. +type ListOpts struct { + Name string `q:"display_name"` + Status string `q:"status"` + VolumeID string `q:"volume_id"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SnapshotPage{pagination.SinglePageBase(r)} + }) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSnapshots(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/results.go new file mode 100644 index 000000000000..528250927351 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/results.go @@ -0,0 +1,130 @@ +package snapshots + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Snapshot contains all the information associated with an OpenStack Snapshot. +type Snapshot struct { + // Currect status of the Snapshot. + Status string `json:"status"` + + // Display name. + Name string `json:"display_name"` + + // Instances onto which the Snapshot is attached. + Attachments []string `json:"attachments"` + + // Logical group. + AvailabilityZone string `json:"availability_zone"` + + // Is the Snapshot bootable? + Bootable string `json:"bootable"` + + // Date created. + CreatedAt time.Time `json:"-"` + + // Display description. + Description string `json:"display_description"` + + // See VolumeType object for more information. + VolumeType string `json:"volume_type"` + + // ID of the Snapshot from which this Snapshot was created. + SnapshotID string `json:"snapshot_id"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `json:"volume_id"` + + // User-defined key-value pairs. + Metadata map[string]string `json:"metadata"` + + // Unique identifier. + ID string `json:"id"` + + // Size of the Snapshot, in GB. + Size int `json:"size"` +} + +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SnapshotPage is a pagination.Pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a SnapshotPage contains no Snapshots. +func (r SnapshotPage) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + return len(volumes) == 0, err +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + err := (r.(SnapshotPage)).ExtractInto(&s) + return s.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/doc.go new file mode 100644 index 000000000000..85c45f4078bb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/doc.go @@ -0,0 +1,2 @@ +// snapshots_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/fixtures.go new file mode 100644 index 000000000000..21be6f90a0c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/fixtures.go @@ -0,0 +1,134 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "snapshots": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "display_name": "snapshot-001", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "display_description": "Daily Backup", + "status": "available", + "size": 30, + "created_at": "2012-02-14T20:53:07" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "display_name": "snapshot-002", + "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "display_description": "Weekly Backup", + "status": "available", + "size": 25, + "created_at": "2012-02-14T20:53:08" + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "snapshot": { + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "display_name": "snapshot-001", + "display_description": "Daily backup", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "created_at": "2012-02-29T03:50:07" + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "snapshot": { + "volume_id": "1234", + "display_name": "snapshot-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "snapshot": { + "volume_id": "1234", + "display_name": "snapshot-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "display_description": "Daily backup", + "volume_id": "1234", + "status": "available", + "size": 30, + "created_at": "2012-02-29T03:50:07" + } +} + `) + }) +} + +func MockUpdateMetadataResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, ` + { + "metadata": { + "key": "v1" + } + } + `) + + fmt.Fprintf(w, ` + { + "metadata": { + "key": "v1" + } + } + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/requests_test.go new file mode 100644 index 000000000000..f4056b5b9ff1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/testing/requests_test.go @@ -0,0 +1,116 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + snapshots.List(client.ServiceClient(), &snapshots.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := snapshots.ExtractSnapshots(page) + if err != nil { + t.Errorf("Failed to extract snapshots: %v", err) + return false, err + } + + expected := []snapshots.Snapshot{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "snapshot-001", + VolumeID: "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + Status: "available", + Size: 30, + CreatedAt: time.Date(2012, 2, 14, 20, 53, 7, 0, time.UTC), + Description: "Daily Backup", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "snapshot-002", + VolumeID: "76b8950a-8594-4e5b-8dce-0dfa9c696358", + Status: "available", + Size: 25, + CreatedAt: time.Date(2012, 2, 14, 20, 53, 8, 0, time.UTC), + Description: "Weekly Backup", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := snapshots.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "snapshot-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := snapshots.CreateOpts{VolumeID: "1234", Name: "snapshot-001"} + n, err := snapshots.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.VolumeID, "1234") + th.AssertEquals(t, n.Name, "snapshot-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateMetadataResponse(t) + + expected := map[string]interface{}{"key": "v1"} + + options := &snapshots.UpdateMetadataOpts{ + Metadata: map[string]interface{}{ + "key": "v1", + }, + } + + actual, err := snapshots.UpdateMetadata(client.ServiceClient(), "123", options).ExtractMetadata() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actual, expected) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := snapshots.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/urls.go new file mode 100644 index 000000000000..7780437493c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/util.go new file mode 100644 index 000000000000..40fbb827b8ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go new file mode 100644 index 000000000000..307b8b12d2f0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go new file mode 100644 index 000000000000..52d738fa9cae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/requests.go @@ -0,0 +1,172 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + Size int `json:"size" required:"true"` + AvailabilityZone string `json:"availability_zone,omitempty"` + Description string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` + Name string `json:"display_name,omitempty"` + SnapshotID string `json:"snapshot_id,omitempty"` + SourceVolID string `json:"source_volid,omitempty"` + ImageID string `json:"imageRef,omitempty"` + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant volumes. + AllTenants bool `q:"all_tenants"` + // List only volumes that contain Metadata. + Metadata map[string]string `q:"metadata"` + // List only volumes that have Name as the display name. + Name string `q:"display_name"` + // List only volumes that have a status of Status. + Status string `q:"status"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.SinglePageBase(r)} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"display_name,omitempty"` + Description string `json:"display_description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go new file mode 100644 index 000000000000..7f68d1486395 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/results.go @@ -0,0 +1,109 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Current status of the volume. + Status string `json:"status"` + // Human-readable display name for the volume. + Name string `json:"display_name"` + // Instances onto which the volume is attached. + Attachments []map[string]interface{} `json:"attachments"` + // This parameter is no longer used. + AvailabilityZone string `json:"availability_zone"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // Human-readable description for the volume. + Description string `json:"display_description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // Unique identifier for the volume. + ID string `json:"id"` + // Size of the volume in GB. + Size int `json:"size"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a VolumePage contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s struct { + Volumes []Volume `json:"volumes"` + } + err := (r.(VolumePage)).ExtractInto(&s) + return s.Volumes, err +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s struct { + Volume *Volume `json:"volume"` + } + err := r.ExtractInto(&s) + return s.Volume, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go new file mode 100644 index 000000000000..088e43c57fcd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/doc.go @@ -0,0 +1,9 @@ +// volumes_v1 +package testing + +/* +This is package created is to hold fixtures (which imports testing), +so that importing volumes package does not inadvertently import testing into production code +More information here: +https://github.com/rackspace/gophercloud/issues/473 +*/ diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go new file mode 100644 index 000000000000..306901b048b8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/fixtures.go @@ -0,0 +1,127 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volumes": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "display_name": "vol-001" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "display_name": "vol-002" + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "volume": { + "id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "display_name": "vol-001", + "display_description": "Another volume.", + "status": "active", + "size": 30, + "volume_type": "289da7f8-6440-407c-9fb4-7db01ec49164", + "metadata": { + "contents": "junk" + }, + "availability_zone": "us-east1", + "bootable": "false", + "snapshot_id": null, + "attachments": [ + { + "attachment_id": "03987cd1-0ad5-40d1-9b2a-7cc48295d4fa", + "id": "47e9ecc5-4045-4ee3-9a4b-d859d546a0cf", + "volume_id": "6c80f8ac-e3e2-480c-8e6e-f1db92fe4bfe", + "server_id": "d1c4788b-9435-42e2-9b81-29f3be1cd01f", + "host_name": "mitaka", + "device": "/" + } + ], + "created_at": "2012-02-14T20:53:07" + } + } + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume": { + "size": 75, + "availability_zone": "us-east1" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "volume": { + "size": 4, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "volume": { + "display_name": "vol-002", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } + } + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/requests_test.go new file mode 100644 index 000000000000..c4ce23a75a8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/testing/requests_test.go @@ -0,0 +1,152 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + volumes.List(client.ServiceClient(), &volumes.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumes.ExtractVolumes(page) + if err != nil { + t.Errorf("Failed to extract volumes: %v", err) + return false, err + } + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := volumes.List(client.ServiceClient(), &volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := volumes.ExtractVolumes(allPages) + th.AssertNoErr(t, err) + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + }, + } + + th.CheckDeepEquals(t, expected, actual) + +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + actual, err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + expected := &volumes.Volume{ + Status: "active", + Name: "vol-001", + Attachments: []map[string]interface{}{ + { + "attachment_id": "03987cd1-0ad5-40d1-9b2a-7cc48295d4fa", + "id": "47e9ecc5-4045-4ee3-9a4b-d859d546a0cf", + "volume_id": "6c80f8ac-e3e2-480c-8e6e-f1db92fe4bfe", + "server_id": "d1c4788b-9435-42e2-9b81-29f3be1cd01f", + "host_name": "mitaka", + "device": "/", + }, + }, + AvailabilityZone: "us-east1", + Bootable: "false", + CreatedAt: time.Date(2012, 2, 14, 20, 53, 07, 0, time.UTC), + Description: "Another volume.", + VolumeType: "289da7f8-6440-407c-9fb4-7db01ec49164", + SnapshotID: "", + SourceVolID: "", + Metadata: map[string]string{ + "contents": "junk", + }, + ID: "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + Size: 30, + } + + th.AssertDeepEquals(t, expected, actual) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &volumes.CreateOpts{ + Size: 75, + AvailabilityZone: "us-east1", + } + n, err := volumes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Size, 4) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := volumes.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateResponse(t) + + options := volumes.UpdateOpts{Name: "vol-002"} + v, err := volumes.Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-002", v.Name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go new file mode 100644 index 000000000000..8a00f97e98c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go new file mode 100644 index 000000000000..e86c1b4b4ee5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go new file mode 100644 index 000000000000..793084f89b20 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/doc.go @@ -0,0 +1,9 @@ +// Package volumetypes provides information and interaction with volume types +// in the OpenStack Block Storage service. A volume type indicates the type of +// a block storage volume, such as SATA, SCSCI, SSD, etc. These can be +// customized or defined by the OpenStack admin. +// +// You can also define extra_specs associated with your volume types. For +// instance, you could have a VolumeType=SATA, with extra_specs (RPM=10000, +// RAID-Level=5) . Extra_specs are defined and customized by the admin. +package volumetypes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go new file mode 100644 index 000000000000..b95c09addd2c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/requests.go @@ -0,0 +1,59 @@ +package volumetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeTypeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts are options for creating a volume type. +type CreateOpts struct { + // See VolumeType. + ExtraSpecs map[string]interface{} `json:"extra_specs,omitempty"` + // See VolumeType. + Name string `json:"name,omitempty"` +} + +// ToVolumeTypeCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToVolumeTypeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume_type") +} + +// Create will create a new volume. To extract the created volume type object, +// call the Extract method on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeTypeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete will delete the volume type with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get will retrieve the volume type with the provided ID. To extract the volume +// type from the result, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// List returns all volume types. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return VolumeTypePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/results.go new file mode 100644 index 000000000000..2c312385c651 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/results.go @@ -0,0 +1,61 @@ +package volumetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// VolumeType contains all information associated with an OpenStack Volume Type. +type VolumeType struct { + ExtraSpecs map[string]interface{} `json:"extra_specs"` // user-defined metadata + ID string `json:"id"` // unique identifier + Name string `json:"name"` // display name +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// VolumeTypePage is a pagination.Pager that is returned from a call to the List function. +type VolumeTypePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a VolumeTypePage contains no Volume Types. +func (r VolumeTypePage) IsEmpty() (bool, error) { + volumeTypes, err := ExtractVolumeTypes(r) + return len(volumeTypes) == 0, err +} + +// ExtractVolumeTypes extracts and returns Volume Types. +func ExtractVolumeTypes(r pagination.Page) ([]VolumeType, error) { + var s struct { + VolumeTypes []VolumeType `json:"volume_types"` + } + err := (r.(VolumeTypePage)).ExtractInto(&s) + return s.VolumeTypes, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume Type object out of the commonResult object. +func (r commonResult) Extract() (*VolumeType, error) { + var s struct { + VolumeType *VolumeType `json:"volume_type"` + } + err := r.ExtractInto(&s) + return s.VolumeType, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/doc.go new file mode 100644 index 000000000000..73834ed73133 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/doc.go @@ -0,0 +1,2 @@ +// volumetypes_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/fixtures.go new file mode 100644 index 000000000000..0e2715a14fba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/fixtures.go @@ -0,0 +1,60 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volume_types": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "name": "vol-type-001", + "extra_specs": { + "capabilities": "gpu" + } + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "name": "vol-type-002", + "extra_specs": {} + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "vol-type-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "extra_specs": { + "serverNumber": "2" + } + } +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/requests_test.go new file mode 100644 index 000000000000..42446151b3f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/testing/requests_test.go @@ -0,0 +1,119 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + volumetypes.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumetypes.ExtractVolumeTypes(page) + if err != nil { + t.Errorf("Failed to extract volume types: %v", err) + return false, err + } + + expected := []volumetypes.VolumeType{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-type-001", + ExtraSpecs: map[string]interface{}{ + "capabilities": "gpu", + }, + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-type-002", + ExtraSpecs: map[string]interface{}{}, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + vt, err := volumetypes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, vt.ExtraSpecs, map[string]interface{}{"serverNumber": "2"}) + th.AssertEquals(t, vt.Name, "vol-type-001") + th.AssertEquals(t, vt.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume_type": { + "name": "vol-type-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "vol-type-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +} + `) + }) + + options := &volumetypes.CreateOpts{Name: "vol-type-001"} + n, err := volumetypes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "vol-type-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.WriteHeader(http.StatusAccepted) + }) + + err := volumetypes.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go new file mode 100644 index 000000000000..822c7dd8916a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumetypes/urls.go @@ -0,0 +1,19 @@ +package volumetypes + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types") +} + +func createURL(c *gophercloud.ServiceClient) string { + return listURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go new file mode 100644 index 000000000000..198f83077c59 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go new file mode 100644 index 000000000000..939e502048fa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/requests.go @@ -0,0 +1,175 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + VolumeID string `json:"volume_id" required:"true"` + Force bool `json:"force,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +// ListOpts hold options for listing Snapshots. It is passed to the +// snapshots.List function. +type ListOpts struct { + // AllTenants will retrieve snapshots of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Name will filter by the specified snapshot name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required to use this. + TenantID string `q:"project_id"` + + // VolumeID will filter by a specified volume ID. + VolumeID string `q:"volume_id"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SnapshotPage{pagination.SinglePageBase(r)} + }) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSnapshots(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go new file mode 100644 index 000000000000..0b444d08ad5a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/results.go @@ -0,0 +1,120 @@ +package snapshots + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Snapshot contains all the information associated with a Cinder Snapshot. +type Snapshot struct { + // Unique identifier. + ID string `json:"id"` + + // Date created. + CreatedAt time.Time `json:"-"` + + // Date updated. + UpdatedAt time.Time `json:"-"` + + // Display name. + Name string `json:"name"` + + // Display description. + Description string `json:"description"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `json:"volume_id"` + + // Currect status of the Snapshot. + Status string `json:"status"` + + // Size of the Snapshot, in GB. + Size int `json:"size"` + + // User-defined key-value pairs. + Metadata map[string]string `json:"metadata"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SnapshotPage is a pagination.Pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.SinglePageBase +} + +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// IsEmpty returns true if a SnapshotPage contains no Snapshots. +func (r SnapshotPage) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + return len(volumes) == 0, err +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + err := (r.(SnapshotPage)).ExtractInto(&s) + return s.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/doc.go new file mode 100644 index 000000000000..9702a25fb2e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/doc.go @@ -0,0 +1,2 @@ +// snapshots_v2 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/fixtures.go new file mode 100644 index 000000000000..9638fa500710 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/fixtures.go @@ -0,0 +1,134 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "snapshots": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "name": "snapshot-001", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "description": "Daily Backup", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "name": "snapshot-002", + "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "description": "Weekly Backup", + "status": "available", + "size": 25, + "created_at": "2017-05-30T03:35:03.000000" + } + ] + } + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "snapshot": { + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "name": "snapshot-001", + "description": "Daily backup", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "snapshot": { + "volume_id": "1234", + "name": "snapshot-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "snapshot": { + "volume_id": "1234", + "name": "snapshot-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "description": "Daily backup", + "volume_id": "1234", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } +} + `) + }) +} + +func MockUpdateMetadataResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, ` + { + "metadata": { + "key": "v1" + } + } + `) + + fmt.Fprintf(w, ` + { + "metadata": { + "key": "v1" + } + } + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/requests_test.go new file mode 100644 index 000000000000..1c44e52c7ed9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/testing/requests_test.go @@ -0,0 +1,116 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + snapshots.List(client.ServiceClient(), &snapshots.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := snapshots.ExtractSnapshots(page) + if err != nil { + t.Errorf("Failed to extract snapshots: %v", err) + return false, err + } + + expected := []snapshots.Snapshot{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "snapshot-001", + VolumeID: "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + Status: "available", + Size: 30, + CreatedAt: time.Date(2017, 5, 30, 3, 35, 3, 0, time.UTC), + Description: "Daily Backup", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "snapshot-002", + VolumeID: "76b8950a-8594-4e5b-8dce-0dfa9c696358", + Status: "available", + Size: 25, + CreatedAt: time.Date(2017, 5, 30, 3, 35, 3, 0, time.UTC), + Description: "Weekly Backup", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := snapshots.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "snapshot-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := snapshots.CreateOpts{VolumeID: "1234", Name: "snapshot-001"} + n, err := snapshots.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.VolumeID, "1234") + th.AssertEquals(t, n.Name, "snapshot-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateMetadataResponse(t) + + expected := map[string]interface{}{"key": "v1"} + + options := &snapshots.UpdateMetadataOpts{ + Metadata: map[string]interface{}{ + "key": "v1", + }, + } + + actual, err := snapshots.UpdateMetadata(client.ServiceClient(), "123", options).ExtractMetadata() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actual, expected) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := snapshots.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go new file mode 100644 index 000000000000..7780437493c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go new file mode 100644 index 000000000000..40fbb827b8ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go new file mode 100644 index 000000000000..307b8b12d2f0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go new file mode 100644 index 000000000000..b7977cbc8f11 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/requests.go @@ -0,0 +1,207 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // AllTenants will retrieve volumes of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Metadata will filter results based on specified metadata. + Metadata map[string]string `q:"metadata"` + + // Name will filter by the specified volume name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go new file mode 100644 index 000000000000..96572b01b458 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/results.go @@ -0,0 +1,167 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` +} + +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r VolumePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volumes_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/doc.go new file mode 100644 index 000000000000..aa8351ab15b2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/doc.go @@ -0,0 +1,2 @@ +// volumes_v2 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/fixtures.go new file mode 100644 index 000000000000..44d2ca383c36 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/fixtures.go @@ -0,0 +1,203 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volumes": [ + { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:35:03.000000", + "bootable": "false", + "name": "vol-001", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [{ + "server_id": "83ec2e3b-4321-422b-8706-a84185f52a0a", + "attachment_id": "05551600-a936-4d4a-ba42-79a037c1-c91a", + "attached_at": "2016-08-06T14:48:20.000000", + "host_name": "foobar", + "volume_id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + "device": "/dev/vdc", + "id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75" + }], + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {"foo": "bar"}, + "status": "available", + "description": null + }, + { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:32:29.000000", + "bootable": "false", + "name": "vol-002", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [], + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {}, + "status": "available", + "description": null + } + ] +} + `) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume": { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:32:29.000000", + "bootable": "false", + "name": "vol-001", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [{ + "server_id": "83ec2e3b-4321-422b-8706-a84185f52a0a", + "attachment_id": "05551600-a936-4d4a-ba42-79a037c1-c91a", + "attached_at": "2016-08-06T14:48:20.000000", + "host_name": "foobar", + "volume_id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + "device": "/dev/vdc", + "id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75" + }], + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {}, + "status": "available", + "description": null + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume": { + "name": "vol-001", + "size": 75 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "volume": { + "size": 75, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "metadata": {}, + "created_at": "2015-09-17T03:32:29.044216", + "encrypted": false, + "bootable": "false", + "availability_zone": "nova", + "attachments": [], + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "status": "creating", + "description": null, + "volume_type": "lvmdriver-1", + "name": "vol-001", + "replication_status": "disabled", + "consistencygroup_id": null, + "source_volid": null, + "snapshot_id": null, + "multiattach": false + } +} + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume": { + "name": "vol-002" + } +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/requests_test.go new file mode 100644 index 000000000000..cd7dcf5ecb76 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/testing/requests_test.go @@ -0,0 +1,257 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + volumes.List(client.ServiceClient(), &volumes.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumes.ExtractVolumes(page) + if err != nil { + t.Errorf("Failed to extract volumes: %v", err) + return false, err + } + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + Attachments: []volumes.Attachment{{ + ServerID: "83ec2e3b-4321-422b-8706-a84185f52a0a", + AttachmentID: "05551600-a936-4d4a-ba42-79a037c1-c91a", + AttachedAt: time.Date(2016, 8, 6, 14, 48, 20, 0, time.UTC), + HostName: "foobar", + VolumeID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + Device: "/dev/vdc", + ID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + }}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 35, 3, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{"foo": "bar"}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + Attachments: []volumes.Attachment{}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 32, 29, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListAllWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + type VolumeWithExt struct { + volumes.Volume + volumetenants.VolumeTenantExt + } + + allPages, err := volumes.List(client.ServiceClient(), &volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + var actual []VolumeWithExt + err = volumes.ExtractVolumesInto(allPages, &actual) + th.AssertNoErr(t, err) + th.AssertEquals(t, 2, len(actual)) + th.AssertEquals(t, "304dc00909ac4d0da6c62d816bcb3459", actual[0].TenantID) +} + +func TestListAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := volumes.List(client.ServiceClient(), &volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := volumes.ExtractVolumes(allPages) + th.AssertNoErr(t, err) + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + Attachments: []volumes.Attachment{{ + ServerID: "83ec2e3b-4321-422b-8706-a84185f52a0a", + AttachmentID: "05551600-a936-4d4a-ba42-79a037c1-c91a", + AttachedAt: time.Date(2016, 8, 6, 14, 48, 20, 0, time.UTC), + HostName: "foobar", + VolumeID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + Device: "/dev/vdc", + ID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + }}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 35, 3, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{"foo": "bar"}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + Attachments: []volumes.Attachment{}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 32, 29, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + } + + th.CheckDeepEquals(t, expected, actual) + +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "vol-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &volumes.CreateOpts{Size: 75, Name: "vol-001"} + n, err := volumes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Size, 75) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := volumes.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateResponse(t) + + options := volumes.UpdateOpts{Name: "vol-002"} + v, err := volumes.Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-002", v.Name) +} + +func TestGetWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + var s struct { + volumes.Volume + volumetenants.VolumeTenantExt + } + err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&s) + th.AssertNoErr(t, err) + th.AssertEquals(t, "304dc00909ac4d0da6c62d816bcb3459", s.TenantID) + + err = volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(s) + if err == nil { + t.Errorf("Expected error when providing non-pointer struct") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go new file mode 100644 index 000000000000..170724905abe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go new file mode 100644 index 000000000000..e86c1b4b4ee5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go new file mode 100644 index 000000000000..198f83077c59 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/doc.go @@ -0,0 +1,5 @@ +// Package snapshots provides information and interaction with snapshots in the +// OpenStack Block Storage service. A snapshot is a point in time copy of the +// data contained in an external storage volume, and can be controlled +// programmatically. +package snapshots diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go new file mode 100644 index 000000000000..22531c9fdfcf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/requests.go @@ -0,0 +1,186 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSnapshotCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Snapshot. This object is passed to +// the snapshots.Create function. For more information about these parameters, +// see the Snapshot object. +type CreateOpts struct { + VolumeID string `json:"volume_id" required:"true"` + Force bool `json:"force,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToSnapshotCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSnapshotCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "snapshot") +} + +// Create will create a new Snapshot based on the values in CreateOpts. To +// extract the Snapshot object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSnapshotCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Snapshot with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Snapshot with the provided ID. To extract the Snapshot +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSnapshotListQuery() (string, error) +} + +type ListOpts struct { + // AllTenants will retrieve snapshots of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Name will filter by the specified snapshot name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required to use this. + TenantID string `q:"project_id"` + + // VolumeID will filter by a specified volume ID. + VolumeID string `q:"volume_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToSnapshotListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSnapshotListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Snapshots optionally limited by the conditions provided in +// ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSnapshotListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SnapshotPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateMetadataOptsBuilder interface { + ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) +} + +// UpdateMetadataOpts contain options for updating an existing Snapshot. This +// object is passed to the snapshots.Update function. For more information +// about the parameters, see the Snapshot object. +type UpdateMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToSnapshotUpdateMetadataMap assembles a request body based on the contents of +// an UpdateMetadataOpts. +func (opts UpdateMetadataOpts) ToSnapshotUpdateMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// UpdateMetadata will update the Snapshot with provided information. To +// extract the updated Snapshot from the response, call the ExtractMetadata +// method on the UpdateMetadataResult. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToSnapshotUpdateMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMetadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a snapshot's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSnapshots(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "snapshot"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "snapshot"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go new file mode 100644 index 000000000000..1ffae8f61f42 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/results.go @@ -0,0 +1,132 @@ +package snapshots + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Snapshot contains all the information associated with a Cinder Snapshot. +type Snapshot struct { + // Unique identifier. + ID string `json:"id"` + + // Date created. + CreatedAt time.Time `json:"-"` + + // Date updated. + UpdatedAt time.Time `json:"-"` + + // Display name. + Name string `json:"name"` + + // Display description. + Description string `json:"description"` + + // ID of the Volume from which this Snapshot was created. + VolumeID string `json:"volume_id"` + + // Currect status of the Snapshot. + Status string `json:"status"` + + // Size of the Snapshot, in GB. + Size int `json:"size"` + + // User-defined key-value pairs. + Metadata map[string]string `json:"metadata"` +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SnapshotPage is a pagination.Pager that is returned from a call to the List function. +type SnapshotPage struct { + pagination.LinkedPageBase +} + +// UnmarshalJSON converts our JSON API response into our snapshot struct +func (r *Snapshot) UnmarshalJSON(b []byte) error { + type tmp Snapshot + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Snapshot(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// IsEmpty returns true if a SnapshotPage contains no Snapshots. +func (r SnapshotPage) IsEmpty() (bool, error) { + volumes, err := ExtractSnapshots(r) + return len(volumes) == 0, err +} + +func (page SnapshotPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"snapshots_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractSnapshots extracts and returns Snapshots. It is used while iterating over a snapshots.List call. +func ExtractSnapshots(r pagination.Page) ([]Snapshot, error) { + var s struct { + Snapshots []Snapshot `json:"snapshots"` + } + err := (r.(SnapshotPage)).ExtractInto(&s) + return s.Snapshots, err +} + +// UpdateMetadataResult contains the response body and error from an UpdateMetadata request. +type UpdateMetadataResult struct { + commonResult +} + +// ExtractMetadata returns the metadata from a response from snapshots.UpdateMetadata. +func (r UpdateMetadataResult) ExtractMetadata() (map[string]interface{}, error) { + if r.Err != nil { + return nil, r.Err + } + m := r.Body.(map[string]interface{})["metadata"] + return m.(map[string]interface{}), nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Snapshot object out of the commonResult object. +func (r commonResult) Extract() (*Snapshot, error) { + var s struct { + Snapshot *Snapshot `json:"snapshot"` + } + err := r.ExtractInto(&s) + return s.Snapshot, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/doc.go new file mode 100644 index 000000000000..3de6274b32ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/doc.go @@ -0,0 +1,2 @@ +// snapshots_v3 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/fixtures.go new file mode 100644 index 000000000000..8255207cc281 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/fixtures.go @@ -0,0 +1,148 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "snapshots": [ + { + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "name": "snapshot-001", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "description": "Daily Backup", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + }, + { + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "name": "snapshot-002", + "volume_id": "76b8950a-8594-4e5b-8dce-0dfa9c696358", + "description": "Weekly Backup", + "status": "available", + "size": 25, + "created_at": "2017-05-30T03:35:03.000000" + } + ], + "snapshots_links": [ + { + "href": "%s/snapshots?marker=1", + "rel": "next" + }] + } + `, th.Server.URL) + case "1": + fmt.Fprintf(w, `{"snapshots": []}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "snapshot": { + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "name": "snapshot-001", + "description": "Daily backup", + "volume_id": "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "snapshot": { + "volume_id": "1234", + "name": "snapshot-001" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "snapshot": { + "volume_id": "1234", + "name": "snapshot-001", + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "description": "Daily backup", + "volume_id": "1234", + "status": "available", + "size": 30, + "created_at": "2017-05-30T03:35:03.000000" + } +} + `) + }) +} + +func MockUpdateMetadataResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/123/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, ` + { + "metadata": { + "key": "v1" + } + } + `) + + fmt.Fprintf(w, ` + { + "metadata": { + "key": "v1" + } + } + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/snapshots/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/requests_test.go new file mode 100644 index 000000000000..51231e3017ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/testing/requests_test.go @@ -0,0 +1,116 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + snapshots.List(client.ServiceClient(), &snapshots.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := snapshots.ExtractSnapshots(page) + if err != nil { + t.Errorf("Failed to extract snapshots: %v", err) + return false, err + } + + expected := []snapshots.Snapshot{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "snapshot-001", + VolumeID: "521752a6-acf6-4b2d-bc7a-119f9148cd8c", + Status: "available", + Size: 30, + CreatedAt: time.Date(2017, 5, 30, 3, 35, 3, 0, time.UTC), + Description: "Daily Backup", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "snapshot-002", + VolumeID: "76b8950a-8594-4e5b-8dce-0dfa9c696358", + Status: "available", + Size: 25, + CreatedAt: time.Date(2017, 5, 30, 3, 35, 3, 0, time.UTC), + Description: "Weekly Backup", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := snapshots.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "snapshot-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := snapshots.CreateOpts{VolumeID: "1234", Name: "snapshot-001"} + n, err := snapshots.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.VolumeID, "1234") + th.AssertEquals(t, n.Name, "snapshot-001") + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateMetadataResponse(t) + + expected := map[string]interface{}{"key": "v1"} + + options := &snapshots.UpdateMetadataOpts{ + Metadata: map[string]interface{}{ + "key": "v1", + }, + } + + actual, err := snapshots.UpdateMetadata(client.ServiceClient(), "123", options).ExtractMetadata() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, actual, expected) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := snapshots.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go new file mode 100644 index 000000000000..7780437493c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/urls.go @@ -0,0 +1,27 @@ +package snapshots + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("snapshots") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func metadataURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("snapshots", id, "metadata") +} + +func updateMetadataURL(c *gophercloud.ServiceClient, id string) string { + return metadataURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go new file mode 100644 index 000000000000..40fbb827b8ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/snapshots/util.go @@ -0,0 +1,22 @@ +package snapshots + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go new file mode 100644 index 000000000000..307b8b12d2f0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/doc.go @@ -0,0 +1,5 @@ +// Package volumes provides information and interaction with volumes in the +// OpenStack Block Storage service. A volume is a detachable block storage +// device, akin to a USB hard drive. It can only be attached to one instance at +// a time. +package volumes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go new file mode 100644 index 000000000000..b7977cbc8f11 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/requests.go @@ -0,0 +1,207 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume. This object is passed to +// the volumes.Create function. For more information about these parameters, +// see the Volume object. +type CreateOpts struct { + // The size of the volume, in GB + Size int `json:"size" required:"true"` + // The availability zone + AvailabilityZone string `json:"availability_zone,omitempty"` + // ConsistencyGroupID is the ID of a consistency group + ConsistencyGroupID string `json:"consistencygroup_id,omitempty"` + // The volume description + Description string `json:"description,omitempty"` + // One or more metadata key and value pairs to associate with the volume + Metadata map[string]string `json:"metadata,omitempty"` + // The volume name + Name string `json:"name,omitempty"` + // the ID of the existing volume snapshot + SnapshotID string `json:"snapshot_id,omitempty"` + // SourceReplica is a UUID of an existing volume to replicate with + SourceReplica string `json:"source_replica,omitempty"` + // the ID of the existing volume + SourceVolID string `json:"source_volid,omitempty"` + // The ID of the image from which you want to create the volume. + // Required to create a bootable volume. + ImageID string `json:"imageRef,omitempty"` + // The associated volume type + VolumeType string `json:"volume_type,omitempty"` +} + +// ToVolumeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Create will create a new Volume based on the values in CreateOpts. To extract +// the Volume object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete will delete the existing Volume with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume with the provided ID. To extract the Volume object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeListQuery() (string, error) +} + +// ListOpts holds options for listing Volumes. It is passed to the volumes.List +// function. +type ListOpts struct { + // AllTenants will retrieve volumes of all tenants/projects. + AllTenants bool `q:"all_tenants"` + + // Metadata will filter results based on specified metadata. + Metadata map[string]string `q:"metadata"` + + // Name will filter by the specified volume name. + Name string `q:"name"` + + // Status will filter by the specified status. + Status string `q:"status"` + + // TenantID will filter by a specific tenant/project ID. + // Setting AllTenants is required for this. + TenantID string `q:"project_id"` + + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + + // Requests a page size of items. + Limit int `q:"limit"` + + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToVolumeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volumes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToVolumeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume. This object is passed +// to the volumes.Update function. For more information about the parameters, see +// the Volume object. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume") +} + +// Update will update the Volume with provided information. To extract the updated +// Volume from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a server's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractVolumes(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "volume"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "volume"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go new file mode 100644 index 000000000000..87f71262c1db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/results.go @@ -0,0 +1,170 @@ +package volumes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Attachment represents a Volume Attachment record +type Attachment struct { + AttachedAt time.Time `json:"-"` + AttachmentID string `json:"attachment_id"` + Device string `json:"device"` + HostName string `json:"host_name"` + ID string `json:"id"` + ServerID string `json:"server_id"` + VolumeID string `json:"volume_id"` +} + +// UnmarshalJSON is our unmarshalling helper +func (r *Attachment) UnmarshalJSON(b []byte) error { + type tmp Attachment + var s struct { + tmp + AttachedAt gophercloud.JSONRFC3339MilliNoZ `json:"attached_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Attachment(s.tmp) + + r.AttachedAt = time.Time(s.AttachedAt) + + return err +} + +// Volume contains all the information associated with an OpenStack Volume. +type Volume struct { + // Unique identifier for the volume. + ID string `json:"id"` + // Current status of the volume. + Status string `json:"status"` + // Size of the volume in GB. + Size int `json:"size"` + // AvailabilityZone is which availability zone the volume is in. + AvailabilityZone string `json:"availability_zone"` + // The date when this volume was created. + CreatedAt time.Time `json:"-"` + // The date when this volume was last updated + UpdatedAt time.Time `json:"-"` + // Instances onto which the volume is attached. + Attachments []Attachment `json:"attachments"` + // Human-readable display name for the volume. + Name string `json:"name"` + // Human-readable description for the volume. + Description string `json:"description"` + // The type of volume to create, either SATA or SSD. + VolumeType string `json:"volume_type"` + // The ID of the snapshot from which the volume was created + SnapshotID string `json:"snapshot_id"` + // The ID of another block storage volume from which the current volume was created + SourceVolID string `json:"source_volid"` + // Arbitrary key-value pairs defined by the user. + Metadata map[string]string `json:"metadata"` + // UserID is the id of the user who created the volume. + UserID string `json:"user_id"` + // Indicates whether this is a bootable volume. + Bootable string `json:"bootable"` + // Encrypted denotes if the volume is encrypted. + Encrypted bool `json:"encrypted"` + // ReplicationStatus is the status of replication. + ReplicationStatus string `json:"replication_status"` + // ConsistencyGroupID is the consistency group ID. + ConsistencyGroupID string `json:"consistencygroup_id"` + // Multiattach denotes if the volume is multi-attach capable. + Multiattach bool `json:"multiattach"` +} + +// UnmarshalJSON another unmarshalling function +func (r *Volume) UnmarshalJSON(b []byte) error { + type tmp Volume + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Volume(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return err +} + +// VolumePage is a pagination.pager that is returned from a call to the List function. +type VolumePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a ListResult contains no Volumes. +func (r VolumePage) IsEmpty() (bool, error) { + volumes, err := ExtractVolumes(r) + return len(volumes) == 0, err +} + +func (page VolumePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volumes_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractVolumes extracts and returns Volumes. It is used while iterating over a volumes.List call. +func ExtractVolumes(r pagination.Page) ([]Volume, error) { + var s []Volume + err := ExtractVolumesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume object out of the commonResult object. +func (r commonResult) Extract() (*Volume, error) { + var s Volume + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractInto converts our response data into a volume struct +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume") +} + +// ExtractVolumesInto similar to ExtractInto but operates on a `list` of volumes +func ExtractVolumesInto(r pagination.Page, v interface{}) error { + return r.(VolumePage).Result.ExtractIntoSlicePtr(v, "volumes") +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/doc.go new file mode 100644 index 000000000000..a2b24b7c1933 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/doc.go @@ -0,0 +1,2 @@ +// volumes_v3 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/fixtures.go new file mode 100644 index 000000000000..74efb1a2564a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/fixtures.go @@ -0,0 +1,217 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "volumes": [ + { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:35:03.000000", + "bootable": "false", + "name": "vol-001", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [{ + "server_id": "83ec2e3b-4321-422b-8706-a84185f52a0a", + "attachment_id": "05551600-a936-4d4a-ba42-79a037c1-c91a", + "attached_at": "2016-08-06T14:48:20.000000", + "host_name": "foobar", + "volume_id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + "device": "/dev/vdc", + "id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75" + }], + "id": "289da7f8-6440-407c-9fb4-7db01ec49164", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {"foo": "bar"}, + "status": "available", + "description": null + }, + { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:32:29.000000", + "bootable": "false", + "name": "vol-002", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [], + "id": "96c3bda7-c82a-4f50-be73-ca7621794835", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {}, + "status": "available", + "description": null + } + ], + "volumes_links": [ + { + "href": "%s/volumes/detail?marker=1", + "rel": "next" + }] +} + `, th.Server.URL) + case "1": + fmt.Fprintf(w, `{"volumes": []}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume": { + "volume_type": "lvmdriver-1", + "created_at": "2015-09-17T03:32:29.000000", + "bootable": "false", + "name": "vol-001", + "os-vol-mig-status-attr:name_id": null, + "consistencygroup_id": null, + "source_volid": null, + "os-volume-replication:driver_data": null, + "multiattach": false, + "snapshot_id": null, + "replication_status": "disabled", + "os-volume-replication:extended_status": null, + "encrypted": false, + "os-vol-host-attr:host": null, + "availability_zone": "nova", + "attachments": [{ + "server_id": "83ec2e3b-4321-422b-8706-a84185f52a0a", + "attachment_id": "05551600-a936-4d4a-ba42-79a037c1-c91a", + "attached_at": "2016-08-06T14:48:20.000000", + "host_name": "foobar", + "volume_id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + "device": "/dev/vdc", + "id": "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75" + }], + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "size": 75, + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "os-vol-tenant-attr:tenant_id": "304dc00909ac4d0da6c62d816bcb3459", + "os-vol-mig-status-attr:migstat": null, + "metadata": {}, + "status": "available", + "description": null + } +} + `) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume": { + "name": "vol-001", + "size": 75 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "volume": { + "size": 75, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "metadata": {}, + "created_at": "2015-09-17T03:32:29.044216", + "encrypted": false, + "bootable": "false", + "availability_zone": "nova", + "attachments": [], + "user_id": "ff1ce52c03ab433aaba9108c2e3ef541", + "status": "creating", + "description": null, + "volume_type": "lvmdriver-1", + "name": "vol-001", + "replication_status": "disabled", + "consistencygroup_id": null, + "source_volid": null, + "snapshot_id": null, + "multiattach": false + } +} + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/volumes/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume": { + "name": "vol-002" + } +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/requests_test.go new file mode 100644 index 000000000000..5a1e46b653af --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/testing/requests_test.go @@ -0,0 +1,257 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/extensions/volumetenants" + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + count := 0 + + volumes.List(client.ServiceClient(), &volumes.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumes.ExtractVolumes(page) + if err != nil { + t.Errorf("Failed to extract volumes: %v", err) + return false, err + } + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + Attachments: []volumes.Attachment{{ + ServerID: "83ec2e3b-4321-422b-8706-a84185f52a0a", + AttachmentID: "05551600-a936-4d4a-ba42-79a037c1-c91a", + AttachedAt: time.Date(2016, 8, 6, 14, 48, 20, 0, time.UTC), + HostName: "foobar", + VolumeID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + Device: "/dev/vdc", + ID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + }}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 35, 3, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{"foo": "bar"}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + Attachments: []volumes.Attachment{}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 32, 29, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListAllWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + type VolumeWithExt struct { + volumes.Volume + volumetenants.VolumeTenantExt + } + + allPages, err := volumes.List(client.ServiceClient(), &volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + var actual []VolumeWithExt + err = volumes.ExtractVolumesInto(allPages, &actual) + th.AssertNoErr(t, err) + th.AssertEquals(t, 2, len(actual)) + th.AssertEquals(t, "304dc00909ac4d0da6c62d816bcb3459", actual[0].TenantID) +} + +func TestListAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := volumes.List(client.ServiceClient(), &volumes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := volumes.ExtractVolumes(allPages) + th.AssertNoErr(t, err) + + expected := []volumes.Volume{ + { + ID: "289da7f8-6440-407c-9fb4-7db01ec49164", + Name: "vol-001", + Attachments: []volumes.Attachment{{ + ServerID: "83ec2e3b-4321-422b-8706-a84185f52a0a", + AttachmentID: "05551600-a936-4d4a-ba42-79a037c1-c91a", + AttachedAt: time.Date(2016, 8, 6, 14, 48, 20, 0, time.UTC), + HostName: "foobar", + VolumeID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + Device: "/dev/vdc", + ID: "d6cacb1a-8b59-4c88-ad90-d70ebb82bb75", + }}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 35, 3, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{"foo": "bar"}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + { + ID: "96c3bda7-c82a-4f50-be73-ca7621794835", + Name: "vol-002", + Attachments: []volumes.Attachment{}, + AvailabilityZone: "nova", + Bootable: "false", + ConsistencyGroupID: "", + CreatedAt: time.Date(2015, 9, 17, 3, 32, 29, 0, time.UTC), + Description: "", + Encrypted: false, + Metadata: map[string]string{}, + Multiattach: false, + //TenantID: "304dc00909ac4d0da6c62d816bcb3459", + //ReplicationDriverData: "", + //ReplicationExtendedStatus: "", + ReplicationStatus: "disabled", + Size: 75, + SnapshotID: "", + SourceVolID: "", + Status: "available", + UserID: "ff1ce52c03ab433aaba9108c2e3ef541", + VolumeType: "lvmdriver-1", + }, + } + + th.CheckDeepEquals(t, expected, actual) + +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "vol-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &volumes.CreateOpts{Size: 75, Name: "vol-001"} + n, err := volumes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Size, 75) + th.AssertEquals(t, n.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := volumes.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateResponse(t) + + options := volumes.UpdateOpts{Name: "vol-002"} + v, err := volumes.Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-002", v.Name) +} + +func TestGetWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + var s struct { + volumes.Volume + volumetenants.VolumeTenantExt + } + err := volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&s) + th.AssertNoErr(t, err) + th.AssertEquals(t, "304dc00909ac4d0da6c62d816bcb3459", s.TenantID) + + err = volumes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(s) + if err == nil { + t.Errorf("Expected error when providing non-pointer struct") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go new file mode 100644 index 000000000000..170724905abe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/urls.go @@ -0,0 +1,23 @@ +package volumes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes") +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("volumes", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("volumes", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go new file mode 100644 index 000000000000..e86c1b4b4ee5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumes/util.go @@ -0,0 +1,22 @@ +package volumes + +import ( + "github.com/gophercloud/gophercloud" +) + +// WaitForStatus will continually poll the resource, checking for a particular +// status. It will do this for the amount of seconds defined. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/doc.go new file mode 100644 index 000000000000..8b2769cb7cda --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/doc.go @@ -0,0 +1,63 @@ +/* +Package volumetypes provides information and interaction with volume types in the +OpenStack Block Storage service. A volume type is a collection of specs used to +define the volume capabilities. + +Example to list Volume Types + + allPages, err := volumetypes.List(client, volumetypes.ListOpts{}).AllPages() + if err != nil{ + panic(err) + } + volumeTypes, err := volumetypes.ExtractVolumeTypes(allPages) + if err != nil{ + panic(err) + } + for _,vt := range volumeTypes{ + fmt.Println(vt) + } + +Example to show a Volume Type + + typeID := "7ffaca22-f646-41d4-b79d-d7e4452ef8cc" + volumeType, err := volumetypes.Get(client, typeID).Extract() + if err != nil{ + panic(err) + } + fmt.Println(volumeType) + +Example to create a Volume Type + + volumeType, err := volumetypes.Create(client, volumetypes.CreateOpts{ + Name:"volume_type_001", + IsPublic:true, + Description:"description_001", + }).Extract() + if err != nil{ + panic(err) + } + fmt.Println(volumeType) + +Example to delete a Volume Type + + typeID := "7ffaca22-f646-41d4-b79d-d7e4452ef8cc" + err := volumetypes.Delete(client, typeID).ExtractErr() + if err != nil{ + panic(err) + } + +Example to update a Volume Type + + typeID := "7ffaca22-f646-41d4-b79d-d7e4452ef8cc" + volumetype, err = volumetypes.Update(client, typeID, volumetypes.UpdateOpts{ + Name: "volume_type_002", + Description:"description_002", + IsPublic:false, + }).Extract() + if err != nil{ + panic(err) + } + fmt.Println(volumetype) +*/ + +package volumetypes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/requests.go new file mode 100644 index 000000000000..82a79bcf6a06 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/requests.go @@ -0,0 +1,138 @@ +package volumetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToVolumeTypeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a Volume Type. This object is passed to +// the volumetypes.Create function. For more information about these parameters, +// see the Volume Type object. +type CreateOpts struct { + // The name of the volume type + Name string `json:"name" required:"true"` + // The volume type description + Description string `json:"description,omitempty"` + // the ID of the existing volume snapshot + IsPublic *bool `json:"os-volume-type-access:is_public,omitempty"` + // Extra spec key-value pairs defined by the user. + ExtraSpecs map[string]string `json:"extra_specs"` +} + +// ToVolumeTypeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToVolumeTypeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume_type") +} + +// Create will create a new Volume Type based on the values in CreateOpts. To extract +// the Volume Type object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeTypeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will delete the existing Volume Type with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get retrieves the Volume Type with the provided ID. To extract the Volume Type object +// from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToVolumeTypeListQuery() (string, error) +} + +// ListOpts holds options for listing Volume Types. It is passed to the volumetypes.List +// function. +type ListOpts struct { + // Comma-separated list of sort keys and optional sort directions in the + // form of [:]. + Sort string `q:"sort"` + // Requests a page size of items. + Limit int `q:"limit"` + // Used in conjunction with limit to return a slice of items. + Offset int `q:"offset"` + // The ID of the last-seen item. + Marker string `q:"marker"` +} + +// ToVolumeTypeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToVolumeTypeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns Volume types. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + + if opts != nil { + query, err := opts.ToVolumeTypeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return VolumeTypePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVolumeTypeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing Volume Type. This object is passed +// to the volumetypes.Update function. For more information about the parameters, see +// the Volume Type object. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + IsPublic *bool `json:"is_public,omitempty"` +} + +// ToVolumeUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToVolumeTypeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volume_type") +} + +// Update will update the Volume Type with provided information. To extract the updated +// Volume Type from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVolumeTypeUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/results.go new file mode 100644 index 000000000000..2e3169070ffa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/results.go @@ -0,0 +1,94 @@ +package volumetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume Type contains all the information associated with an OpenStack Volume Type. +type VolumeType struct { + // Unique identifier for the volume type. + ID string `json:"id"` + // Human-readable display name for the volume type. + Name string `json:"name"` + // Human-readable description for the volume type. + Description string `json:"description"` + // Arbitrary key-value pairs defined by the user. + ExtraSpecs map[string]string `json:"extra_specs"` + // Whether the volume type is publicly visible. + IsPublic bool `json:"is_public"` + // Qos Spec ID + QosSpecID string `json:"qos_specs_id"` + // Volume Type access public attribute + PublicAccess bool `json:"os-volume-type-access:is_public"` +} + +// VolumeTypePage is a pagination.pager that is returned from a call to the List function. +type VolumeTypePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a ListResult contains no Volume Types. +func (r VolumeTypePage) IsEmpty() (bool, error) { + volumetypes, err := ExtractVolumeTypes(r) + return len(volumetypes) == 0, err +} + +func (page VolumeTypePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"volume_type_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractVolumeTypes extracts and returns Volumes. It is used while iterating over a volumetypes.List call. +func ExtractVolumeTypes(r pagination.Page) ([]VolumeType, error) { + var s []VolumeType + err := ExtractVolumeTypesInto(r, &s) + return s, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Volume Type object out of the commonResult object. +func (r commonResult) Extract() (*VolumeType, error) { + var s VolumeType + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractInto converts our response data into a volume type struct +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "volume_type") +} + +// ExtractVolumesInto similar to ExtractInto but operates on a `list` of volume types +func ExtractVolumeTypesInto(r pagination.Page, v interface{}) error { + return r.(VolumeTypePage).Result.ExtractIntoSlicePtr(v, "volume_types") +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/doc.go new file mode 100644 index 000000000000..3fd720a6740c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/doc.go @@ -0,0 +1,2 @@ +// volume_types +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/fixtures.go new file mode 100644 index 000000000000..979aee174c1d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/fixtures.go @@ -0,0 +1,154 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` +{ + "volume_types": [ + { + "name": "SSD", + "qos_specs_id": null, + "os-volume-type-access:is_public": true, + "extra_specs": { + "volume_backend_name": "lvmdriver-1" + }, + "is_public": true, + "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", + "description": null + }, + { + "name": "SATA", + "qos_specs_id": null, + "os-volume-type-access:is_public": true, + "extra_specs": { + "volume_backend_name": "lvmdriver-1" + }, + "is_public": true, + "id": "8eb69a46-df97-4e41-9586-9a40a7533803", + "description": null + } + ], + "volume_type_links": [ + { + "href": "%s/types?marker=1", + "rel": "next" + } + ] +} + `, th.Server.URL) + case "1": + fmt.Fprintf(w, `{"volume_types": []}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume_type": { + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "name": "vol-type-001", + "os-volume-type-access:is_public": true, + "qos_specs_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "description": "volume type 001", + "is_public": true, + "extra_specs": { + "capabilities": "gpu" + } + } +} +`) + }) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "volume_type": { + "name": "test_type", + "os-volume-type-access:is_public": true, + "description": "test_type_desc", + "extra_specs": { + "capabilities": "gpu" + } + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "test_type", + "extra_specs": {}, + "is_public": true, + "os-volume-type-access:is_public": true, + "id": "6d0ff92a-0007-4780-9ece-acfe5876966a", + "description": "test_type_desc", + "extra_specs": { + "capabilities": "gpu" + } + } +} + `) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/types/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` +{ + "volume_type": { + "name": "vol-type-002", + "description": "volume type 0001", + "is_public": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22" + } +}`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/requests_test.go new file mode 100644 index 000000000000..00c3e6ea9e6d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/testing/requests_test.go @@ -0,0 +1,118 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListAll(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + pages := 0 + err := volumetypes.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + actual, err := volumetypes.ExtractVolumeTypes(page) + if err != nil { + return false, err + } + expected := []volumetypes.VolumeType{ + { + ID: "6685584b-1eac-4da6-b5c3-555430cf68ff", + Name: "SSD", + ExtraSpecs: map[string]string{"volume_backend_name": "lvmdriver-1"}, + IsPublic: true, + Description: "", + QosSpecID: "", + PublicAccess: true, + }, { + ID: "8eb69a46-df97-4e41-9586-9a40a7533803", + Name: "SATA", + ExtraSpecs: map[string]string{"volume_backend_name": "lvmdriver-1"}, + IsPublic: true, + Description: "", + QosSpecID: "", + PublicAccess: true, + }, + } + th.CheckDeepEquals(t, expected, actual) + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, pages, 1) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + v, err := volumetypes.Get(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, v.Name, "vol-type-001") + th.AssertEquals(t, v.ID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, v.ExtraSpecs["capabilities"], "gpu") + th.AssertEquals(t, v.QosSpecID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, v.PublicAccess, true) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + var isPublic = true + + options := &volumetypes.CreateOpts{ + Name: "test_type", + IsPublic: &isPublic, + Description: "test_type_desc", + ExtraSpecs: map[string]string{"capabilities": "gpu"}, + } + + n, err := volumetypes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "test_type") + th.AssertEquals(t, n.Description, "test_type_desc") + th.AssertEquals(t, n.IsPublic, true) + th.AssertEquals(t, n.PublicAccess, true) + th.AssertEquals(t, n.ID, "6d0ff92a-0007-4780-9ece-acfe5876966a") + th.AssertEquals(t, n.ExtraSpecs["capabilities"], "gpu") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := volumetypes.Delete(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateResponse(t) + + var isPublic = true + options := volumetypes.UpdateOpts{ + Name: "vol-type-002", + IsPublic: &isPublic, + } + + v, err := volumetypes.Update(client.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22", options).Extract() + th.AssertNoErr(t, err) + th.CheckEquals(t, "vol-type-002", v.Name) + th.CheckEquals(t, true, v.IsPublic) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/urls.go new file mode 100644 index 000000000000..f794af86d1f4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/blockstorage/v3/volumetypes/urls.go @@ -0,0 +1,23 @@ +package volumetypes + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/doc.go new file mode 100644 index 000000000000..f78d4f73551a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/doc.go @@ -0,0 +1,4 @@ +// Package base provides information and interaction with the base API +// resource in the OpenStack CDN service. This API resource allows for +// retrieving the Home Document and pinging the root URL. +package base diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/requests.go new file mode 100644 index 000000000000..34d3b724fb68 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/requests.go @@ -0,0 +1,19 @@ +package base + +import "github.com/gophercloud/gophercloud" + +// Get retrieves the home document, allowing the user to discover the +// entire API. +func Get(c *gophercloud.ServiceClient) (r GetResult) { + _, r.Err = c.Get(getURL(c), &r.Body, nil) + return +} + +// Ping retrieves a ping to the server. +func Ping(c *gophercloud.ServiceClient) (r PingResult) { + _, r.Err = c.Get(pingURL(c), nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + MoreHeaders: map[string]string{"Accept": ""}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/results.go new file mode 100644 index 000000000000..2dfde7dca3cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/results.go @@ -0,0 +1,23 @@ +package base + +import "github.com/gophercloud/gophercloud" + +// HomeDocument is a resource that contains all the resources for the CDN API. +type HomeDocument map[string]interface{} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a home document resource. +func (r GetResult) Extract() (*HomeDocument, error) { + var s HomeDocument + err := r.ExtractInto(&s) + return &s, err +} + +// PingResult represents the result of a Ping operation. +type PingResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/doc.go new file mode 100644 index 000000000000..891c69a2151f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/doc.go @@ -0,0 +1,2 @@ +// cdn_base_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/fixtures.go new file mode 100644 index 000000000000..f1f4ac00479d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/fixtures.go @@ -0,0 +1,53 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleGetSuccessfully creates an HTTP handler at `/` on the test handler mux +// that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "resources": { + "rel/cdn": { + "href-template": "services{?marker,limit}", + "href-vars": { + "marker": "param/marker", + "limit": "param/limit" + }, + "hints": { + "allow": [ + "GET" + ], + "formats": { + "application/json": {} + } + } + } + } + } + `) + + }) +} + +// HandlePingSuccessfully creates an HTTP handler at `/ping` on the test handler +// mux that responds with a `Ping` response. +func HandlePingSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/ping", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/requests_test.go new file mode 100644 index 000000000000..9c9517e3a0b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/testing/requests_test.go @@ -0,0 +1,46 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/cdn/v1/base" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetHomeDocument(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := base.Get(fake.ServiceClient()).Extract() + th.CheckNoErr(t, err) + + expected := base.HomeDocument{ + "resources": map[string]interface{}{ + "rel/cdn": map[string]interface{}{ + "href-template": "services{?marker,limit}", + "href-vars": map[string]interface{}{ + "marker": "param/marker", + "limit": "param/limit", + }, + "hints": map[string]interface{}{ + "allow": []interface{}{"GET"}, + "formats": map[string]interface{}{ + "application/json": map[string]interface{}{}, + }, + }, + }, + }, + } + th.CheckDeepEquals(t, expected, *actual) +} + +func TestPing(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePingSuccessfully(t) + + err := base.Ping(fake.ServiceClient()).ExtractErr() + th.CheckNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/urls.go new file mode 100644 index 000000000000..07d892ba9384 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/base/urls.go @@ -0,0 +1,11 @@ +package base + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL() +} + +func pingURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ping") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/doc.go new file mode 100644 index 000000000000..d4066985cb25 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the flavors API +// resource in the OpenStack CDN service. This API resource allows for +// listing flavors and retrieving a specific flavor. +// +// A flavor is a mapping configuration to a CDN provider. +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/requests.go new file mode 100644 index 000000000000..1977fe365a66 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/requests.go @@ -0,0 +1,19 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a single page of CDN flavors. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.SinglePageBase(r)} + }) +} + +// Get retrieves a specific flavor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/results.go new file mode 100644 index 000000000000..02c285134b89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/results.go @@ -0,0 +1,60 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Provider represents a provider for a particular flavor. +type Provider struct { + // Specifies the name of the provider. The name must not exceed 64 bytes in + // length and is limited to unicode, digits, underscores, and hyphens. + Provider string `json:"provider"` + // Specifies a list with an href where rel is provider_url. + Links []gophercloud.Link `json:"links"` +} + +// Flavor represents a mapping configuration to a CDN provider. +type Flavor struct { + // Specifies the name of the flavor. The name must not exceed 64 bytes in + // length and is limited to unicode, digits, underscores, and hyphens. + ID string `json:"id"` + // Specifies the list of providers mapped to this flavor. + Providers []Provider `json:"providers"` + // Specifies the self-navigating JSON document paths. + Links []gophercloud.Link `json:"links"` +} + +// FlavorPage is the page returned by a pager when traversing over a +// collection of CDN flavors. +type FlavorPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a FlavorPage contains no Flavors. +func (r FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(r) + return len(flavors) == 0, err +} + +// ExtractFlavors extracts and returns Flavors. It is used while iterating over +// a flavors.List call. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a flavor from a GetResult. +func (r GetResult) Extract() (*Flavor, error) { + var s *Flavor + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/doc.go new file mode 100644 index 000000000000..567b67e23768 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/doc.go @@ -0,0 +1,2 @@ +// cdn_flavors_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/fixtures.go new file mode 100644 index 000000000000..ed97247e2e5e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/fixtures.go @@ -0,0 +1,82 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleListCDNFlavorsSuccessfully creates an HTTP handler at `/flavors` on the test handler mux +// that responds with a `List` response. +func HandleListCDNFlavorsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "flavors": [ + { + "id": "europe", + "providers": [ + { + "provider": "Fastly", + "links": [ + { + "href": "http://www.fastly.com", + "rel": "provider_url" + } + ] + } + ], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/flavors/europe", + "rel": "self" + } + ] + } + ] + } + `) + }) +} + +// HandleGetCDNFlavorSuccessfully creates an HTTP handler at `/flavors/{id}` on the test handler mux +// that responds with a `Get` response. +func HandleGetCDNFlavorSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/asia", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "id" : "asia", + "providers" : [ + { + "provider" : "ChinaCache", + "links": [ + { + "href": "http://www.chinacache.com", + "rel": "provider_url" + } + ] + } + ], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/flavors/asia", + "rel": "self" + } + ] + } + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/requests_test.go new file mode 100644 index 000000000000..bc4b1a50e568 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/testing/requests_test.go @@ -0,0 +1,90 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListCDNFlavorsSuccessfully(t) + + count := 0 + + err := flavors.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := flavors.ExtractFlavors(page) + if err != nil { + t.Errorf("Failed to extract flavors: %v", err) + return false, err + } + + expected := []flavors.Flavor{ + { + ID: "europe", + Providers: []flavors.Provider{ + { + Provider: "Fastly", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.fastly.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "self", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetCDNFlavorSuccessfully(t) + + expected := &flavors.Flavor{ + ID: "asia", + Providers: []flavors.Provider{ + { + Provider: "ChinaCache", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://www.chinacache.com", + Rel: "provider_url", + }, + }, + }, + }, + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "self", + }, + }, + } + + actual, err := flavors.Get(fake.ServiceClient(), "asia").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/urls.go new file mode 100644 index 000000000000..a8540a2aed8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/flavors/urls.go @@ -0,0 +1,11 @@ +package flavors + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("flavors") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("flavors", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/doc.go new file mode 100644 index 000000000000..ceecaa5a5e44 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/doc.go @@ -0,0 +1,7 @@ +// Package serviceassets provides information and interaction with the +// serviceassets API resource in the OpenStack CDN service. This API resource +// allows for deleting cached assets. +// +// A service distributes assets across the network. Service assets let you +// interrogate properties about these assets and perform certain actions on them. +package serviceassets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/requests.go new file mode 100644 index 000000000000..80c908fd9d92 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/requests.go @@ -0,0 +1,51 @@ +package serviceassets + +import ( + "strings" + + "github.com/gophercloud/gophercloud" +) + +// DeleteOptsBuilder allows extensions to add additional parameters to the Delete +// request. +type DeleteOptsBuilder interface { + ToCDNAssetDeleteParams() (string, error) +} + +// DeleteOpts is a structure that holds options for deleting CDN service assets. +type DeleteOpts struct { + // If all is set to true, specifies that the delete occurs against all of the + // assets for the service. + All bool `q:"all"` + // Specifies the relative URL of the asset to be deleted. + URL string `q:"url"` +} + +// ToCDNAssetDeleteParams formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToCDNAssetDeleteParams() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete accepts a unique service ID or URL and deletes the CDN service asset associated with +// it. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Delete(c *gophercloud.ServiceClient, idOrURL string, opts DeleteOptsBuilder) (r DeleteResult) { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = deleteURL(c, idOrURL) + } + if opts != nil { + q, err := opts.ToCDNAssetDeleteParams() + if err != nil { + r.Err = err + return + } + url += q + } + _, r.Err = c.Delete(url, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/results.go new file mode 100644 index 000000000000..b6114c689396 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/results.go @@ -0,0 +1,8 @@ +package serviceassets + +import "github.com/gophercloud/gophercloud" + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/doc.go new file mode 100644 index 000000000000..1adb681a281f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/doc.go @@ -0,0 +1,2 @@ +// cdn_serviceassets_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/fixtures.go new file mode 100644 index 000000000000..3172d30fd124 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/fixtures.go @@ -0,0 +1,19 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleDeleteCDNAssetSuccessfully creates an HTTP handler at `/services/{id}/assets` on the test handler mux +// that responds with a `Delete` response. +func HandleDeleteCDNAssetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0/assets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/requests_test.go new file mode 100644 index 000000000000..ff2073bd861c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/testing/requests_test.go @@ -0,0 +1,19 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteCDNAssetSuccessfully(t) + + err := serviceassets.Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", nil).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/urls.go new file mode 100644 index 000000000000..ce1741826aeb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/serviceassets/urls.go @@ -0,0 +1,7 @@ +package serviceassets + +import "github.com/gophercloud/gophercloud" + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("services", id, "assets") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/doc.go new file mode 100644 index 000000000000..41f7c60dae21 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/doc.go @@ -0,0 +1,7 @@ +// Package services provides information and interaction with the services API +// resource in the OpenStack CDN service. This API resource allows for +// listing, creating, updating, retrieving, and deleting services. +// +// A service represents an application that has its content cached to the edge +// nodes. +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/errors.go new file mode 100644 index 000000000000..359584c2a666 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/errors.go @@ -0,0 +1,7 @@ +package services + +import "fmt" + +func no(str string) error { + return fmt.Errorf("Required parameter %s not provided", str) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/requests.go new file mode 100644 index 000000000000..4c0c6266066f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/requests.go @@ -0,0 +1,285 @@ +package services + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToCDNServiceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` +} + +// ToCDNServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToCDNServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// CDN services. It accepts a ListOpts struct, which allows for pagination via +// marker and limit. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToCDNServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ServicePage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToCDNServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Specifies the name of the service. The minimum length for name is + // 3. The maximum length is 256. + Name string `json:"name" required:"true"` + // Specifies a list of domains used by users to access their website. + Domains []Domain `json:"domains" required:"true"` + // Specifies a list of origin domains or IP addresses where the + // original assets are stored. + Origins []Origin `json:"origins" required:"true"` + // Specifies the CDN provider flavor ID to use. For a list of + // flavors, see the operation to list the available flavors. The minimum + // length for flavor_id is 1. The maximum length is 256. + FlavorID string `json:"flavor_id" required:"true"` + // Specifies the TTL rules for the assets under this service. Supports wildcards for fine-grained control. + Caching []CacheRule `json:"caching,omitempty"` + // Specifies the restrictions that define who can access assets (content from the CDN cache). + Restrictions []Restriction `json:"restrictions,omitempty"` +} + +// ToCDNServiceCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToCDNServiceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create accepts a CreateOpts struct and creates a new CDN service using the +// values provided. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToCDNServiceCreateMap() + if err != nil { + r.Err = err + return r + } + resp, err := c.Post(createURL(c), &b, nil, nil) + r.Header = resp.Header + r.Err = err + return +} + +// Get retrieves a specific service based on its URL or its unique ID. For +// example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Get(c *gophercloud.ServiceClient, idOrURL string) (r GetResult) { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = getURL(c, idOrURL) + } + _, r.Err = c.Get(url, &r.Body, nil) + return +} + +// Path is a JSON pointer location that indicates which service parameter is being added, replaced, +// or removed. +type Path struct { + baseElement string +} + +func (p Path) renderRoot() string { + return "/" + p.baseElement +} + +func (p Path) renderDash() string { + return fmt.Sprintf("/%s/-", p.baseElement) +} + +func (p Path) renderIndex(index int64) string { + return fmt.Sprintf("/%s/%d", p.baseElement, index) +} + +var ( + // PathDomains indicates that an update operation is to be performed on a Domain. + PathDomains = Path{baseElement: "domains"} + + // PathOrigins indicates that an update operation is to be performed on an Origin. + PathOrigins = Path{baseElement: "origins"} + + // PathCaching indicates that an update operation is to be performed on a CacheRule. + PathCaching = Path{baseElement: "caching"} +) + +type value interface { + toPatchValue() interface{} + appropriatePath() Path + renderRootOr(func(p Path) string) string +} + +// Patch represents a single update to an existing Service. Multiple updates to a service can be +// submitted at the same time. +type Patch interface { + ToCDNServiceUpdateMap() map[string]interface{} +} + +// Insertion is a Patch that requests the addition of a value (Domain, Origin, or CacheRule) to +// a Service at a fixed index. Use an Append instead to append the new value to the end of its +// collection. Pass it to the Update function as part of the Patch slice. +type Insertion struct { + Index int64 + Value value +} + +// ToCDNServiceUpdateMap converts an Insertion into a request body fragment suitable for the +// Update call. +func (opts Insertion) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "add", + "path": opts.Value.renderRootOr(func(p Path) string { return p.renderIndex(opts.Index) }), + "value": opts.Value.toPatchValue(), + } +} + +// Append is a Patch that requests the addition of a value (Domain, Origin, or CacheRule) to a +// Service at the end of its respective collection. Use an Insertion instead to insert the value +// at a fixed index within the collection. Pass this to the Update function as part of its +// Patch slice. +type Append struct { + Value value +} + +// ToCDNServiceUpdateMap converts an Append into a request body fragment suitable for the +// Update call. +func (a Append) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "add", + "path": a.Value.renderRootOr(func(p Path) string { return p.renderDash() }), + "value": a.Value.toPatchValue(), + } +} + +// Replacement is a Patch that alters a specific service parameter (Domain, Origin, or CacheRule) +// in-place by index. Pass it to the Update function as part of the Patch slice. +type Replacement struct { + Value value + Index int64 +} + +// ToCDNServiceUpdateMap converts a Replacement into a request body fragment suitable for the +// Update call. +func (r Replacement) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": r.Value.renderRootOr(func(p Path) string { return p.renderIndex(r.Index) }), + "value": r.Value.toPatchValue(), + } +} + +// NameReplacement specifically updates the Service name. Pass it to the Update function as part +// of the Patch slice. +type NameReplacement struct { + NewName string +} + +// ToCDNServiceUpdateMap converts a NameReplacement into a request body fragment suitable for the +// Update call. +func (r NameReplacement) ToCDNServiceUpdateMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/name", + "value": r.NewName, + } +} + +// Removal is a Patch that requests the removal of a service parameter (Domain, Origin, or +// CacheRule) by index. Pass it to the Update function as part of the Patch slice. +type Removal struct { + Path Path + Index int64 + All bool +} + +// ToCDNServiceUpdateMap converts a Removal into a request body fragment suitable for the +// Update call. +func (opts Removal) ToCDNServiceUpdateMap() map[string]interface{} { + b := map[string]interface{}{"op": "remove"} + if opts.All { + b["path"] = opts.Path.renderRoot() + } else { + b["path"] = opts.Path.renderIndex(opts.Index) + } + return b +} + +// UpdateOpts is a slice of Patches used to update a CDN service +type UpdateOpts []Patch + +// Update accepts a slice of Patch operations (Insertion, Append, Replacement or Removal) and +// updates an existing CDN service using the values provided. idOrURL can be either the service's +// URL or its ID. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Update(c *gophercloud.ServiceClient, idOrURL string, opts UpdateOpts) (r UpdateResult) { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = updateURL(c, idOrURL) + } + + b := make([]map[string]interface{}, len(opts)) + for i, patch := range opts { + b[i] = patch.ToCDNServiceUpdateMap() + } + + resp, err := c.Request("PATCH", url, &gophercloud.RequestOpts{ + JSONBody: &b, + OkCodes: []int{202}, + }) + r.Header = resp.Header + r.Err = err + return +} + +// Delete accepts a service's ID or its URL and deletes the CDN service +// associated with it. For example, both "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" and +// "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" +// are valid options for idOrURL. +func Delete(c *gophercloud.ServiceClient, idOrURL string) (r DeleteResult) { + var url string + if strings.Contains(idOrURL, "/") { + url = idOrURL + } else { + url = deleteURL(c, idOrURL) + } + _, r.Err = c.Delete(url, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/results.go new file mode 100644 index 000000000000..f9a1caae73c3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/results.go @@ -0,0 +1,304 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Domain represents a domain used by users to access their website. +type Domain struct { + // Specifies the domain used to access the assets on their website, for which + // a CNAME is given to the CDN provider. + Domain string `json:"domain" required:"true"` + // Specifies the protocol used to access the assets on this domain. Only "http" + // or "https" are currently allowed. The default is "http". + Protocol string `json:"protocol,omitempty"` +} + +func (d Domain) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["domain"] = d.Domain + if d.Protocol != "" { + r["protocol"] = d.Protocol + } + return r +} + +func (d Domain) appropriatePath() Path { + return PathDomains +} + +func (d Domain) renderRootOr(render func(p Path) string) string { + return render(d.appropriatePath()) +} + +// DomainList provides a useful way to perform bulk operations in a single Patch. +type DomainList []Domain + +func (list DomainList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, domain := range list { + r[i] = domain.toPatchValue() + } + return r +} + +func (list DomainList) appropriatePath() Path { + return PathDomains +} + +func (list DomainList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// OriginRule represents a rule that defines when an origin should be accessed. +type OriginRule struct { + // Specifies the name of this rule. + Name string `json:"name" required:"true"` + // Specifies the request URL this rule should match for this origin to be used. Regex is supported. + RequestURL string `json:"request_url" required:"true"` +} + +// Origin specifies a list of origin domains or IP addresses where the original assets are stored. +type Origin struct { + // Specifies the URL or IP address to pull origin content from. + Origin string `json:"origin" required:"true"` + // Specifies the port used to access the origin. The default is port 80. + Port int `json:"port,omitempty"` + // Specifies whether or not to use HTTPS to access the origin. The default + // is false. + SSL bool `json:"ssl"` + // Specifies a collection of rules that define the conditions when this origin + // should be accessed. If there is more than one origin, the rules parameter is required. + Rules []OriginRule `json:"rules,omitempty"` +} + +func (o Origin) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["origin"] = o.Origin + r["port"] = o.Port + r["ssl"] = o.SSL + if len(o.Rules) > 0 { + r["rules"] = make([]map[string]interface{}, len(o.Rules)) + for index, rule := range o.Rules { + submap := r["rules"].([]map[string]interface{})[index] + submap["name"] = rule.Name + submap["request_url"] = rule.RequestURL + } + } + return r +} + +func (o Origin) appropriatePath() Path { + return PathOrigins +} + +func (o Origin) renderRootOr(render func(p Path) string) string { + return render(o.appropriatePath()) +} + +// OriginList provides a useful way to perform bulk operations in a single Patch. +type OriginList []Origin + +func (list OriginList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, origin := range list { + r[i] = origin.toPatchValue() + } + return r +} + +func (list OriginList) appropriatePath() Path { + return PathOrigins +} + +func (list OriginList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// TTLRule specifies a rule that determines if a TTL should be applied to an asset. +type TTLRule struct { + // Specifies the name of this rule. + Name string `json:"name" required:"true"` + // Specifies the request URL this rule should match for this TTL to be used. Regex is supported. + RequestURL string `json:"request_url" required:"true"` +} + +// CacheRule specifies the TTL rules for the assets under this service. +type CacheRule struct { + // Specifies the name of this caching rule. Note: 'default' is a reserved name used for the default TTL setting. + Name string `json:"name" required:"true"` + // Specifies the TTL to apply. + TTL int `json:"ttl,omitempty"` + // Specifies a collection of rules that determine if this TTL should be applied to an asset. + Rules []TTLRule `json:"rules,omitempty"` +} + +func (c CacheRule) toPatchValue() interface{} { + r := make(map[string]interface{}) + r["name"] = c.Name + r["ttl"] = c.TTL + r["rules"] = make([]map[string]interface{}, len(c.Rules)) + for index, rule := range c.Rules { + submap := r["rules"].([]map[string]interface{})[index] + submap["name"] = rule.Name + submap["request_url"] = rule.RequestURL + } + return r +} + +func (c CacheRule) appropriatePath() Path { + return PathCaching +} + +func (c CacheRule) renderRootOr(render func(p Path) string) string { + return render(c.appropriatePath()) +} + +// CacheRuleList provides a useful way to perform bulk operations in a single Patch. +type CacheRuleList []CacheRule + +func (list CacheRuleList) toPatchValue() interface{} { + r := make([]interface{}, len(list)) + for i, rule := range list { + r[i] = rule.toPatchValue() + } + return r +} + +func (list CacheRuleList) appropriatePath() Path { + return PathCaching +} + +func (list CacheRuleList) renderRootOr(_ func(p Path) string) string { + return list.appropriatePath().renderRoot() +} + +// RestrictionRule specifies a rule that determines if this restriction should be applied to an asset. +type RestrictionRule struct { + // Specifies the name of this rule. + Name string `json:"name" required:"true"` + // Specifies the http host that requests must come from. + Referrer string `json:"referrer,omitempty"` +} + +// Restriction specifies a restriction that defines who can access assets (content from the CDN cache). +type Restriction struct { + // Specifies the name of this restriction. + Name string `json:"name" required:"true"` + // Specifies a collection of rules that determine if this TTL should be applied to an asset. + Rules []RestrictionRule `json:"rules,omitempty"` +} + +// Error specifies an error that occurred during the previous service action. +type Error struct { + // Specifies an error message detailing why there is an error. + Message string `json:"message"` +} + +// Service represents a CDN service resource. +type Service struct { + // Specifies the service ID that represents distributed content. The value is + // a UUID, such as 96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0, that is generated by the server. + ID string `json:"id"` + // Specifies the name of the service. + Name string `json:"name"` + // Specifies a list of domains used by users to access their website. + Domains []Domain `json:"domains"` + // Specifies a list of origin domains or IP addresses where the original assets are stored. + Origins []Origin `json:"origins"` + // Specifies the TTL rules for the assets under this service. Supports wildcards for fine grained control. + Caching []CacheRule `json:"caching"` + // Specifies the restrictions that define who can access assets (content from the CDN cache). + Restrictions []Restriction `json:"restrictions"` + // Specifies the CDN provider flavor ID to use. For a list of flavors, see the operation to list the available flavors. + FlavorID string `json:"flavor_id"` + // Specifies the current status of the service. + Status string `json:"status"` + // Specifies the list of errors that occurred during the previous service action. + Errors []Error `json:"errors"` + // Specifies the self-navigating JSON document paths. + Links []gophercloud.Link `json:"links"` +} + +// ServicePage is the page returned by a pager when traversing over a +// collection of CDN services. +type ServicePage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no services. +func (r ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(r) + return len(services) == 0, err +} + +// LastMarker returns the last service in a ListResult. +func (r ServicePage) LastMarker() (string, error) { + services, err := ExtractServices(r) + if err != nil { + return "", err + } + if len(services) == 0 { + return "", nil + } + return (services[len(services)-1]).ID, nil +} + +// ExtractServices is a function that takes a ListResult and returns the services' information. +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Services []Service `json:"services"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Services, err +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.Result +} + +// Extract is a method that extracts the location of a newly created service. +func (r CreateResult) Extract() (string, error) { + if r.Err != nil { + return "", r.Err + } + if l, ok := r.Header["Location"]; ok && len(l) > 0 { + return l[0], nil + } + return "", nil +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that extracts a service from a GetResult. +func (r GetResult) Extract() (*Service, error) { + var s Service + err := r.ExtractInto(&s) + return &s, err +} + +// UpdateResult represents the result of a Update operation. +type UpdateResult struct { + gophercloud.Result +} + +// Extract is a method that extracts the location of an updated service. +func (r UpdateResult) Extract() (string, error) { + if r.Err != nil { + return "", r.Err + } + if l, ok := r.Header["Location"]; ok && len(l) > 0 { + return l[0], nil + } + return "", nil +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/doc.go new file mode 100644 index 000000000000..c72e391afe3f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/doc.go @@ -0,0 +1,2 @@ +// cdn_services_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/fixtures.go new file mode 100644 index 000000000000..d4093e051510 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/fixtures.go @@ -0,0 +1,372 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleListCDNServiceSuccessfully creates an HTTP handler at `/services` on the test handler mux +// that responds with a `List` response. +func HandleListCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "links": [ + { + "rel": "next", + "href": "https://www.poppycdn.io/v1.0/services?marker=96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0&limit=20" + } + ], + "services": [ + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + }, + { + "name": "home", + "ttl": 17200, + "rules": [ + { + "name": "index", + "request_url": "/index.htm" + } + ] + }, + { + "name": "images", + "ttl": 12800, + "rules": [ + { + "name": "images", + "request_url": "*.png" + } + ] + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "flavor_id": "asia", + "status": "deployed", + "errors" : [], + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "rel": "self" + }, + { + "href": "mywebsite.com.cdn123.poppycdn.net", + "rel": "access_url" + }, + { + "href": "https://www.poppycdn.io/v1.0/flavors/asia", + "rel": "flavor" + } + ] + }, + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + "name": "myothersite.com", + "domains": [ + { + "domain": "www.myothersite.com" + } + ], + "origins": [ + { + "origin": "44.33.22.11", + "port": 80, + "ssl": false + }, + { + "origin": "77.66.55.44", + "port": 80, + "ssl": false, + "rules": [ + { + "name": "videos", + "request_url": "^/videos/*.m3u" + } + ] + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + } + ], + "restrictions": [ + {} + ], + "flavor_id": "europe", + "status": "deployed", + "links": [ + { + "href": "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + "rel": "self" + }, + { + "href": "myothersite.com.poppycdn.net", + "rel": "access_url" + }, + { + "href": "https://www.poppycdn.io/v1.0/flavors/europe", + "rel": "flavor" + } + ] + } + ] + } + `) + case "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1": + fmt.Fprintf(w, `{ + "services": [] + }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateCDNServiceSuccessfully creates an HTTP handler at `/services` on the test handler mux +// that responds with a `Create` response. +func HandleCreateCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, ` + { + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com" + }, + { + "domain": "blog.mywebsite.com" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + } + ], + + "flavor_id": "cdn" + } + `) + w.Header().Add("Location", "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleGetCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Get` response. +func HandleGetCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "id": "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "name": "mywebsite.com", + "domains": [ + { + "domain": "www.mywebsite.com", + "protocol": "http" + } + ], + "origins": [ + { + "origin": "mywebsite.com", + "port": 80, + "ssl": false + } + ], + "caching": [ + { + "name": "default", + "ttl": 3600 + }, + { + "name": "home", + "ttl": 17200, + "rules": [ + { + "name": "index", + "request_url": "/index.htm" + } + ] + }, + { + "name": "images", + "ttl": 12800, + "rules": [ + { + "name": "images", + "request_url": "*.png" + } + ] + } + ], + "restrictions": [ + { + "name": "website only", + "rules": [ + { + "name": "mywebsite.com", + "referrer": "www.mywebsite.com" + } + ] + } + ], + "flavor_id": "cdn", + "status": "deployed", + "errors" : [], + "links": [ + { + "href": "https://global.cdn.api.rackspacecloud.com/v1.0/110011/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + "rel": "self" + }, + { + "href": "blog.mywebsite.com.cdn1.raxcdn.com", + "rel": "access_url" + }, + { + "href": "https://global.cdn.api.rackspacecloud.com/v1.0/110011/flavors/cdn", + "rel": "flavor" + } + ] + } + `) + }) +} + +// HandleUpdateCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Update` response. +func HandleUpdateCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, ` + [ + { + "op": "add", + "path": "/domains/-", + "value": {"domain": "appended.mocksite4.com"} + }, + { + "op": "add", + "path": "/domains/4", + "value": {"domain": "inserted.mocksite4.com"} + }, + { + "op": "add", + "path": "/domains", + "value": [ + {"domain": "bulkadded1.mocksite4.com"}, + {"domain": "bulkadded2.mocksite4.com"} + ] + }, + { + "op": "replace", + "path": "/origins/2", + "value": {"origin": "44.33.22.11", "port": 80, "ssl": false} + }, + { + "op": "replace", + "path": "/origins", + "value": [ + {"origin": "44.33.22.11", "port": 80, "ssl": false}, + {"origin": "55.44.33.22", "port": 443, "ssl": true} + ] + }, + { + "op": "remove", + "path": "/caching/8" + }, + { + "op": "remove", + "path": "/caching" + }, + { + "op": "replace", + "path": "/name", + "value": "differentServiceName" + } + ] + `) + w.Header().Add("Location", "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDeleteCDNServiceSuccessfully creates an HTTP handler at `/services/{id}` on the test handler mux +// that responds with a `Delete` response. +func HandleDeleteCDNServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/requests_test.go new file mode 100644 index 000000000000..0abc98ef3d9f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/testing/requests_test.go @@ -0,0 +1,359 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/cdn/v1/services" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListCDNServiceSuccessfully(t) + + count := 0 + + err := services.List(fake.ServiceClient(), &services.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := services.ExtractServices(page) + if err != nil { + t.Errorf("Failed to extract services: %v", err) + return false, err + } + + expected := []services.Service{ + { + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []services.Domain{ + { + Domain: "www.mywebsite.com", + }, + }, + Origins: []services.Origin{ + { + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []services.CacheRule{ + { + Name: "default", + TTL: 3600, + }, + { + Name: "home", + TTL: 17200, + Rules: []services.TTLRule{ + { + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + { + Name: "images", + TTL: 12800, + Rules: []services.TTLRule{ + { + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []services.Restriction{ + { + Name: "website only", + Rules: []services.RestrictionRule{ + { + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "asia", + Status: "deployed", + Errors: []services.Error{}, + Links: []gophercloud.Link{ + { + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + { + Href: "mywebsite.com.cdn123.poppycdn.net", + Rel: "access_url", + }, + { + Href: "https://www.poppycdn.io/v1.0/flavors/asia", + Rel: "flavor", + }, + }, + }, + { + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Name: "myothersite.com", + Domains: []services.Domain{ + { + Domain: "www.myothersite.com", + }, + }, + Origins: []services.Origin{ + { + Origin: "44.33.22.11", + Port: 80, + SSL: false, + }, + { + Origin: "77.66.55.44", + Port: 80, + SSL: false, + Rules: []services.OriginRule{ + { + Name: "videos", + RequestURL: "^/videos/*.m3u", + }, + }, + }, + }, + Caching: []services.CacheRule{ + { + Name: "default", + TTL: 3600, + }, + }, + Restrictions: []services.Restriction{}, + FlavorID: "europe", + Status: "deployed", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f1", + Rel: "self", + }, + gophercloud.Link{ + Href: "myothersite.com.poppycdn.net", + Rel: "access_url", + }, + gophercloud.Link{ + Href: "https://www.poppycdn.io/v1.0/flavors/europe", + Rel: "flavor", + }, + }, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateCDNServiceSuccessfully(t) + + createOpts := services.CreateOpts{ + Name: "mywebsite.com", + Domains: []services.Domain{ + { + Domain: "www.mywebsite.com", + }, + { + Domain: "blog.mywebsite.com", + }, + }, + Origins: []services.Origin{ + { + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Restrictions: []services.Restriction{ + { + Name: "website only", + Rules: []services.RestrictionRule{ + { + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + Caching: []services.CacheRule{ + { + Name: "default", + TTL: 3600, + }, + }, + FlavorID: "cdn", + } + + expected := "https://global.cdn.api.rackspacecloud.com/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + actual, err := services.Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetCDNServiceSuccessfully(t) + + expected := &services.Service{ + ID: "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Name: "mywebsite.com", + Domains: []services.Domain{ + { + Domain: "www.mywebsite.com", + Protocol: "http", + }, + }, + Origins: []services.Origin{ + { + Origin: "mywebsite.com", + Port: 80, + SSL: false, + }, + }, + Caching: []services.CacheRule{ + { + Name: "default", + TTL: 3600, + }, + { + Name: "home", + TTL: 17200, + Rules: []services.TTLRule{ + { + Name: "index", + RequestURL: "/index.htm", + }, + }, + }, + { + Name: "images", + TTL: 12800, + Rules: []services.TTLRule{ + { + Name: "images", + RequestURL: "*.png", + }, + }, + }, + }, + Restrictions: []services.Restriction{ + { + Name: "website only", + Rules: []services.RestrictionRule{ + { + Name: "mywebsite.com", + Referrer: "www.mywebsite.com", + }, + }, + }, + }, + FlavorID: "cdn", + Status: "deployed", + Errors: []services.Error{}, + Links: []gophercloud.Link{ + { + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", + Rel: "self", + }, + { + Href: "blog.mywebsite.com.cdn1.raxcdn.com", + Rel: "access_url", + }, + { + Href: "https://global.cdn.api.rackspacecloud.com/v1.0/110011/flavors/cdn", + Rel: "flavor", + }, + }, + } + + actual, err := services.Get(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestSuccessfulUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateCDNServiceSuccessfully(t) + + expected := "https://www.poppycdn.io/v1.0/services/96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0" + ops := services.UpdateOpts{ + // Append a single Domain + services.Append{Value: services.Domain{Domain: "appended.mocksite4.com"}}, + // Insert a single Domain + services.Insertion{ + Index: 4, + Value: services.Domain{Domain: "inserted.mocksite4.com"}, + }, + // Bulk addition + services.Append{ + Value: services.DomainList{ + {Domain: "bulkadded1.mocksite4.com"}, + {Domain: "bulkadded2.mocksite4.com"}, + }, + }, + // Replace a single Origin + services.Replacement{ + Index: 2, + Value: services.Origin{Origin: "44.33.22.11", Port: 80, SSL: false}, + }, + // Bulk replace Origins + services.Replacement{ + Index: 0, // Ignored + Value: services.OriginList{ + {Origin: "44.33.22.11", Port: 80, SSL: false}, + {Origin: "55.44.33.22", Port: 443, SSL: true}, + }, + }, + // Remove a single CacheRule + services.Removal{ + Index: 8, + Path: services.PathCaching, + }, + // Bulk removal + services.Removal{ + All: true, + Path: services.PathCaching, + }, + // Service name replacement + services.NameReplacement{ + NewName: "differentServiceName", + }, + } + + actual, err := services.Update(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0", ops).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, expected, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteCDNServiceSuccessfully(t) + + err := services.Delete(fake.ServiceClient(), "96737ae3-cfc1-4c72-be88-5d0e7cc9a3f0").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/urls.go new file mode 100644 index 000000000000..5bb3ca9d9252 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/cdn/v1/services/urls.go @@ -0,0 +1,23 @@ +package services + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("services") +} + +func createURL(c *gophercloud.ServiceClient) string { + return listURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("services", id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/client.go new file mode 100644 index 000000000000..6d93af5b4175 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/client.go @@ -0,0 +1,427 @@ +package openstack + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const ( + // v2 represents Keystone v2. + // It should never increase beyond 2.0. + v2 = "v2.0" + + // v3 represents Keystone v3. + // The version can be anything from v3 to v3.x. + v3 = "v3" +) + +/* +NewClient prepares an unauthenticated ProviderClient instance. +Most users will probably prefer using the AuthenticatedClient function +instead. + +This is useful if you wish to explicitly control the version of the identity +service that's used for authentication explicitly, for example. + +A basic example of using this would be: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.NewClient(ao.IdentityEndpoint) + client, err := openstack.NewIdentityV3(provider, gophercloud.EndpointOpts{}) +*/ +func NewClient(endpoint string) (*gophercloud.ProviderClient, error) { + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + endpoint = gophercloud.NormalizeURL(endpoint) + base = gophercloud.NormalizeURL(base) + + p := new(gophercloud.ProviderClient) + p.IdentityBase = base + p.IdentityEndpoint = endpoint + p.UseTokenLock() + + return p, nil +} + +/* +AuthenticatedClient logs in to an OpenStack cloud found at the identity endpoint +specified by the options, acquires a token, and returns a Provider Client +instance that's ready to operate. + +If the full path to a versioned identity endpoint was specified (example: +http://example.com:5000/v3), that path will be used as the endpoint to query. + +If a versionless endpoint was specified (example: http://example.com:5000/), +the endpoint will be queried to determine which versions of the identity service +are available, then chooses the most recent or most supported version. + +Example: + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +func AuthenticatedClient(options gophercloud.AuthOptions) (*gophercloud.ProviderClient, error) { + client, err := NewClient(options.IdentityEndpoint) + if err != nil { + return nil, err + } + + err = Authenticate(client, options) + if err != nil { + return nil, err + } + return client, nil +} + +// Authenticate or re-authenticate against the most recent identity service +// supported at the provided endpoint. +func Authenticate(client *gophercloud.ProviderClient, options gophercloud.AuthOptions) error { + versions := []*utils.Version{ + {ID: v2, Priority: 20, Suffix: "/v2.0/"}, + {ID: v3, Priority: 30, Suffix: "/v3/"}, + } + + chosen, endpoint, err := utils.ChooseVersion(client, versions) + if err != nil { + return err + } + + switch chosen.ID { + case v2: + return v2auth(client, endpoint, options, gophercloud.EndpointOpts{}) + case v3: + return v3auth(client, endpoint, &options, gophercloud.EndpointOpts{}) + default: + // The switch statement must be out of date from the versions list. + return fmt.Errorf("Unrecognized identity version: %s", chosen.ID) + } +} + +// AuthenticateV2 explicitly authenticates against the identity v2 endpoint. +func AuthenticateV2(client *gophercloud.ProviderClient, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + return v2auth(client, "", options, eo) +} + +func v2auth(client *gophercloud.ProviderClient, endpoint string, options gophercloud.AuthOptions, eo gophercloud.EndpointOpts) error { + v2Client, err := NewIdentityV2(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v2Client.Endpoint = endpoint + } + + v2Opts := tokens2.AuthOptions{ + IdentityEndpoint: options.IdentityEndpoint, + Username: options.Username, + Password: options.Password, + TenantID: options.TenantID, + TenantName: options.TenantName, + AllowReauth: options.AllowReauth, + TokenID: options.TokenID, + } + + result := tokens2.Create(v2Client, v2Opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + if options.AllowReauth { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + tao := options + tao.AllowReauth = false + client.ReauthFunc = func() error { + err := v2auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil + } + } + client.TokenID = token.ID + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V2EndpointURL(catalog, opts) + } + + return nil +} + +// AuthenticateV3 explicitly authenticates against the identity v3 service. +func AuthenticateV3(client *gophercloud.ProviderClient, options tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + return v3auth(client, "", options, eo) +} + +func v3auth(client *gophercloud.ProviderClient, endpoint string, opts tokens3.AuthOptionsBuilder, eo gophercloud.EndpointOpts) error { + // Override the generated service endpoint with the one returned by the version endpoint. + v3Client, err := NewIdentityV3(client, eo) + if err != nil { + return err + } + + if endpoint != "" { + v3Client.Endpoint = endpoint + } + + result := tokens3.Create(v3Client, opts) + + token, err := result.ExtractToken() + if err != nil { + return err + } + + catalog, err := result.ExtractServiceCatalog() + if err != nil { + return err + } + + client.TokenID = token.ID + + if opts.CanReauth() { + // here we're creating a throw-away client (tac). it's a copy of the user's provider client, but + // with the token and reauth func zeroed out. combined with setting `AllowReauth` to `false`, + // this should retry authentication only once + tac := *client + tac.ReauthFunc = nil + tac.TokenID = "" + var tao tokens3.AuthOptionsBuilder + switch ot := opts.(type) { + case *gophercloud.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + case *tokens3.AuthOptions: + o := *ot + o.AllowReauth = false + tao = &o + default: + tao = opts + } + client.ReauthFunc = func() error { + err := v3auth(&tac, endpoint, tao, eo) + if err != nil { + return err + } + client.TokenID = tac.TokenID + return nil + } + } + client.EndpointLocator = func(opts gophercloud.EndpointOpts) (string, error) { + return V3EndpointURL(catalog, opts) + } + + return nil +} + +// NewIdentityV2 creates a ServiceClient that may be used to interact with the +// v2 identity service. +func NewIdentityV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v2.0/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +// NewIdentityV3 creates a ServiceClient that may be used to access the v3 +// identity service. +func NewIdentityV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + endpoint := client.IdentityBase + "v3/" + clientType := "identity" + var err error + if !reflect.DeepEqual(eo, gophercloud.EndpointOpts{}) { + eo.ApplyDefaults(clientType) + endpoint, err = client.EndpointLocator(eo) + if err != nil { + return nil, err + } + } + + // Ensure endpoint still has a suffix of v3. + // This is because EndpointLocator might have found a versionless + // endpoint or the published endpoint is still /v2.0. In both + // cases, we need to fix the endpoint to point to /v3. + base, err := utils.BaseEndpoint(endpoint) + if err != nil { + return nil, err + } + + base = gophercloud.NormalizeURL(base) + + endpoint = base + "v3/" + + return &gophercloud.ServiceClient{ + ProviderClient: client, + Endpoint: endpoint, + Type: clientType, + }, nil +} + +func initClientOpts(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, clientType string) (*gophercloud.ServiceClient, error) { + sc := new(gophercloud.ServiceClient) + eo.ApplyDefaults(clientType) + url, err := client.EndpointLocator(eo) + if err != nil { + return sc, err + } + sc.ProviderClient = client + sc.Endpoint = url + sc.Type = clientType + return sc, nil +} + +// NewObjectStorageV1 creates a ServiceClient that may be used with the v1 +// object storage package. +func NewObjectStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "object-store") +} + +// NewComputeV2 creates a ServiceClient that may be used with the v2 compute +// package. +func NewComputeV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "compute") +} + +// NewNetworkV2 creates a ServiceClient that may be used with the v2 network +// package. +func NewNetworkV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "network") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewBlockStorageV1 creates a ServiceClient that may be used to access the v1 +// block storage service. +func NewBlockStorageV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volume") +} + +// NewBlockStorageV2 creates a ServiceClient that may be used to access the v2 +// block storage service. +func NewBlockStorageV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev2") +} + +// NewBlockStorageV3 creates a ServiceClient that may be used to access the v3 block storage service. +func NewBlockStorageV3(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "volumev3") +} + +// NewSharedFileSystemV2 creates a ServiceClient that may be used to access the v2 shared file system service. +func NewSharedFileSystemV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "sharev2") +} + +// NewCDNV1 creates a ServiceClient that may be used to access the OpenStack v1 +// CDN service. +func NewCDNV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "cdn") +} + +// NewOrchestrationV1 creates a ServiceClient that may be used to access the v1 +// orchestration service. +func NewOrchestrationV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "orchestration") +} + +// NewDBV1 creates a ServiceClient that may be used to access the v1 DB service. +func NewDBV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "database") +} + +// NewDNSV2 creates a ServiceClient that may be used to access the v2 DNS +// service. +func NewDNSV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "dns") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewImageServiceV2 creates a ServiceClient that may be used to access the v2 +// image service. +func NewImageServiceV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "image") + sc.ResourceBase = sc.Endpoint + "v2/" + return sc, err +} + +// NewLoadBalancerV2 creates a ServiceClient that may be used to access the v2 +// load balancer service. +func NewLoadBalancerV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "load-balancer") + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc, err +} + +// NewClusteringV1 creates a ServiceClient that may be used with the v1 clustering +// package. +func NewClusteringV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "clustering") +} + +// NewMessagingV2 creates a ServiceClient that may be used with the v2 messaging +// service. +func NewMessagingV2(client *gophercloud.ProviderClient, clientID string, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "messaging") + sc.MoreHeaders = map[string]string{"Client-ID": clientID} + return sc, err +} + +// NewContainerV1 creates a ServiceClient that may be used with v1 container package +func NewContainerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container") +} + +// NewKeyManagerV1 creates a ServiceClient that may be used with the v1 key +// manager service. +func NewKeyManagerV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + sc, err := initClientOpts(client, eo, "key-manager") + sc.ResourceBase = sc.Endpoint + "v1/" + return sc, err +} + +// NewContainerInfraV1 creates a ServiceClient that may be used with the v1 container infra management +// package. +func NewContainerInfraV1(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "container-infra") +} + +// NewWorkflowV2 creates a ServiceClient that may be used with the v2 workflow management package. +func NewWorkflowV2(client *gophercloud.ProviderClient, eo gophercloud.EndpointOpts) (*gophercloud.ServiceClient, error) { + return initClientOpts(client, eo, "workflowv2") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/doc.go new file mode 100644 index 000000000000..2312a512f356 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/doc.go @@ -0,0 +1,33 @@ +/* +Package actions provides listing and retrieving of senlin actions for the +OpenStack Clustering Service. + +Example to List Actions + + opts := actions.ListOpts{ + Limit: 5, + } + + err = actions.List(serviceClient, opts).EachPage(func(page pagination.Page) (bool, error) { + actionInfos, err := actions.ExtractActions(page) + if err != nil { + return false, err + } + + for _, actionInfo := range actionInfos { + fmt.Println("%+v\n", actionInfo) + } + return true, nil + }) + +Example to Get an Action + + actionID := "edce3528-864f-41fb-8759-f4707925cc09" + action, err := actions.Get(serviceClient, actionID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("Action %+v: ", action) +*/ +package actions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/requests.go new file mode 100644 index 000000000000..f1a4f43f3ec2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/requests.go @@ -0,0 +1,51 @@ +package actions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToActionListQuery() (string, error) +} + +// ListOpts represents options used to list actions. +type ListOpts struct { + Limit int `q:"limit"` + Marker string `q:"marker"` + Sort string `q:"sort"` + GlobalProject *bool `q:"global_project"` + Name string `q:"name"` + Target string `q:"target"` + Action string `q:"action"` + Status string `q:"status"` +} + +// ToClusterListQuery builds a query string from ListOpts. +func (opts ListOpts) ToActionListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of actions. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToActionListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ActionPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details of a single action. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/results.go new file mode 100644 index 000000000000..ca03a4b71c89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/results.go @@ -0,0 +1,106 @@ +package actions + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Action represents a detailed Action. +type Action struct { + Action string `json:"action"` + Cause string `json:"cause"` + CreatedAt time.Time `json:"-"` + Data map[string]interface{} `json:"data"` + DependedBy []string `json:"depended_by"` + DependsOn []string `json:"depends_on"` + StartTime float64 `json:"start_time"` + EndTime float64 `json:"end_time"` + ID string `json:"id"` + Inputs map[string]interface{} `json:"inputs"` + Interval int `json:"interval"` + Name string `json:"name"` + Outputs map[string]interface{} `json:"outputs"` + Owner string `json:"owner"` + Project string `json:"project"` + Status string `json:"status"` + StatusReason string `json:"status_reason"` + Target string `json:"target"` + Timeout int `json:"timeout"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Action) UnmarshalJSON(b []byte) error { + type tmp Action + var s struct { + tmp + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Action(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt) + if err != nil { + return err + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt) + if err != nil { + return err + } + } + + return nil +} + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult-based result as an Action. +func (r commonResult) Extract() (*Action, error) { + var s struct { + Action *Action `json:"action"` + } + err := r.ExtractInto(&s) + return s.Action, err +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as an Action. +type GetResult struct { + commonResult +} + +// ActionPage contains a single page of all actions from a List call. +type ActionPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a ActionPage contains any results. +func (r ActionPage) IsEmpty() (bool, error) { + actions, err := ExtractActions(r) + return len(actions) == 0, err +} + +// ExtractActions returns a slice of Actions from the List operation. +func ExtractActions(r pagination.Page) ([]Action, error) { + var s struct { + Actions []Action `json:"actions"` + } + err := (r.(ActionPage)).ExtractInto(&s) + return s.Actions, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/doc.go new file mode 100644 index 000000000000..ffecf4d863e8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_actions_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/fixtures.go new file mode 100644 index 000000000000..4709b7c51630 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/fixtures.go @@ -0,0 +1,166 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/actions" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ListResponse = ` +{ + "actions": [ + { + "action": "NODE_DELETE", + "cause": "RPC Request", + "created_at": "2015-11-04T05:21:41Z", + "data": {}, + "depended_by": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "depends_on": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "end_time": 1425550000.0, + "id": "edce3528-864f-41fb-8759-f4707925cc09", + "inputs": {}, + "interval": -1, + "name": "node_delete_f0de9b9c", + "outputs": {}, + "owner": null, + "project": "f1fe61dcda2f4618a14c10dc7abc214d", + "start_time": 1425550000.0, + "status": "SUCCEEDED", + "status_reason": "Action completed successfully.", + "target": "f0de9b9c-6d48-4a46-af21-2ca8607777fe", + "timeout": 3600, + "updated_at": "2016-11-04T05:21:41Z", + "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" + }, + { + "action": "NODE_DELETE", + "cause": "RPC Request", + "created_at": null, + "data": {}, + "depended_by": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "depends_on": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "end_time": 1425550000.0, + "id": "edce3528-864f-41fb-8759-f4707925cc09", + "inputs": {}, + "interval": -1, + "name": "node_delete_f0de9b9c", + "outputs": {}, + "owner": null, + "project": "f1fe61dcda2f4618a14c10dc7abc214d", + "start_time": 1425550000.0, + "status": "SUCCEEDED", + "status_reason": "Action completed successfully.", + "target": "f0de9b9c-6d48-4a46-af21-2ca8607777fe", + "timeout": 3600, + "updated_at": "", + "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" + } + ] +} +` + +const GetResponse = ` +{ + "action": { + "action": "NODE_DELETE", + "cause": "RPC Request", + "created_at": "2015-11-04T05:21:41Z", + "data": {}, + "depended_by": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "depends_on": ["ef67fe80-6547-40f2-ba1b-83e950aa38df"], + "end_time": 1425550000.0, + "id": "edce3528-864f-41fb-8759-f4707925cc09", + "inputs": {}, + "interval": -1, + "name": "node_delete_f0de9b9c", + "outputs": {}, + "owner": null, + "project": "f1fe61dcda2f4618a14c10dc7abc214d", + "start_time": 1425550000.0, + "status": "SUCCEEDED", + "status_reason": "Action completed successfully.", + "target": "f0de9b9c-6d48-4a46-af21-2ca8607777fe", + "timeout": 3600, + "updated_at": "2016-11-04T05:21:41Z", + "user": "8bcd2cdca7684c02afc9e4f2fc0f0c79" + } +} +` + +var ExpectedAction1 = actions.Action{ + Action: "NODE_DELETE", + Cause: "RPC Request", + CreatedAt: time.Date(2015, 11, 4, 5, 21, 41, 0, time.UTC), + Data: map[string]interface{}{}, + DependedBy: []string{"ef67fe80-6547-40f2-ba1b-83e950aa38df"}, + DependsOn: []string{"ef67fe80-6547-40f2-ba1b-83e950aa38df"}, + EndTime: 1425550000.0, + ID: "edce3528-864f-41fb-8759-f4707925cc09", + Inputs: make(map[string]interface{}), + Interval: -1, + Name: "node_delete_f0de9b9c", + Outputs: make(map[string]interface{}), + Owner: "", + Project: "f1fe61dcda2f4618a14c10dc7abc214d", + StartTime: 1425550000.0, + Status: "SUCCEEDED", + StatusReason: "Action completed successfully.", + Target: "f0de9b9c-6d48-4a46-af21-2ca8607777fe", + Timeout: 3600, + UpdatedAt: time.Date(2016, 11, 4, 5, 21, 41, 0, time.UTC), + User: "8bcd2cdca7684c02afc9e4f2fc0f0c79", +} + +var ExpectedAction2 = actions.Action{ + Action: "NODE_DELETE", + Cause: "RPC Request", + CreatedAt: time.Time{}, + Data: map[string]interface{}{}, + DependedBy: []string{"ef67fe80-6547-40f2-ba1b-83e950aa38df"}, + DependsOn: []string{"ef67fe80-6547-40f2-ba1b-83e950aa38df"}, + EndTime: 1425550000.0, + ID: "edce3528-864f-41fb-8759-f4707925cc09", + Inputs: make(map[string]interface{}), + Interval: -1, + Name: "node_delete_f0de9b9c", + Outputs: make(map[string]interface{}), + Owner: "", + Project: "f1fe61dcda2f4618a14c10dc7abc214d", + StartTime: 1425550000.0, + Status: "SUCCEEDED", + StatusReason: "Action completed successfully.", + Target: "f0de9b9c-6d48-4a46-af21-2ca8607777fe", + Timeout: 3600, + UpdatedAt: time.Time{}, + User: "8bcd2cdca7684c02afc9e4f2fc0f0c79", +} + +var ExpectedActions = []actions.Action{ExpectedAction1, ExpectedAction2} + +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResponse) + }) +} + +func HandleGetSuccessfully(t *testing.T, id string) { + th.Mux.HandleFunc("/v1/actions/"+id, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/requests_test.go new file mode 100644 index 000000000000..6a4e5f1592a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/testing/requests_test.go @@ -0,0 +1,44 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/actions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListActions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + pageCount := 0 + err := actions.List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pageCount++ + actual, err := actions.ExtractActions(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedActions, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if pageCount != 1 { + t.Errorf("Expected 1 page, got %d", pageCount) + } +} + +func TestGetAction(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t, ExpectedAction1.ID) + + actual, err := actions.Get(fake.ServiceClient(), ExpectedAction1.ID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedAction1, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/urls.go new file mode 100644 index 000000000000..3288cc805e90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/actions/urls.go @@ -0,0 +1,22 @@ +package actions + +import "github.com/gophercloud/gophercloud" + +var apiVersion = "v1" +var apiName = "actions" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/doc.go new file mode 100644 index 000000000000..f731fb5376fc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/doc.go @@ -0,0 +1,227 @@ +/* +Package clusters provides information and interaction with the clusters through +the OpenStack Clustering service. + +Example to Create a Cluster + + createOpts := clusters.CreateOpts{ + Name: "test-cluster", + DesiredCapacity: 1, + ProfileID: "b7b870ee-d3c5-4a93-b9d7-846c53b2c2da", + } + + cluster, err := clusters.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Cluster + + clusterName := "cluster123" + cluster, err := clusters.Get(serviceClient, clusterName).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", cluster) + +Example to List Clusters + + listOpts := clusters.ListOpts{ + Name: "testcluster", + } + + allPages, err := clusters.ListDetail(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allClusters, err := clusters.ExtractClusters(allPages) + if err != nil { + panic(err) + } + + for _, cluster := range allClusters { + fmt.Printf("%+v\n", cluster) + } + +Example to Update a Cluster + + updateOpts := clusters.UpdateOpts{ + Name: "testcluster", + ProfileID: "b7b870ee-d3c5-4a93-b9d7-846c53b2c2da", + } + + clusterID := "7d85f602-a948-4a30-afd4-e84f47471c15" + cluster, err := clusters.Update(serviceClient, clusterName, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", cluster) + +Example to Delete a Cluster + + clusterID := "dc6d336e3fc4c0a951b5698cd1236ee" + err := clusters.Delete(serviceClient, clusterID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Resize a Cluster + + number := 1 + maxSize := 5 + minSize := 1 + minStep := 1 + strict := true + + resizeOpts := clusters.ResizeOpts{ + AdjustmentType: clusters.ChangeInCapacityAdjustment, + Number: number, + MaxSize: &maxSize, + MinSize: &minSize, + MinStep: &minStep, + Strict: &strict, + } + + actionID, err := clusters.Resize(client, clusterName, resizeOpts).Extract() + if err != nil { + t.Fatalf("Unable to resize cluster: %v", err) + } + fmt.Println("Resize actionID", actionID) + +Example to ScaleIn a Cluster + + count := 2 + scaleInOpts := clusters.ScaleInOpts{ + Count: &count, + } + clusterID: "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + + action, err := clusters.ScaleIn(computeClient, clusterID, scaleInOpts).Extract() + if err != nil { + panic(err) + } + +Example to ScaleOut a cluster + + scaleOutOpts := clusters.ScaleOutOpts{ + Count: 2, + } + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + + actionID, err := clusters.ScaleOut(computeClient, clusterID, scaleOutOpts).Extract() + if err != nil { + panic(err) + } + +Example to List Policies for a Cluster + + clusterID := "7d85f602-a948-4a30-afd4-e84f47471c15" + allPages, err := clusters.ListPolicies(serviceClient, clusterID, nil).AllPages() + if err != nil { + panic(err) + } + + allClusterPolicies, err := clusters.ExtractClusterPolicies(allPages) + if err != nil { + panic(err) + } + + for _, clusterPolicy := range allClusterPolicies { + fmt.Printf("%+v\n", clusterPolicy) + } + +Example to Get a Cluster Policy + + clusterID := "7d85f602-a948-4a30-afd4-e84f47471c15" + profileID := "714fe676-a08f-4196-b7af-61d52eeded15" + clusterPolicy, err := clusterpolicies.Get(serviceCLient, clusterID, profileID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", clusterPolicy) + +Example to Attach a Policy to a Cluster + + enabled := true + attachPolicyOpts := clusters.AttachPolicyOpts{ + PolicyID: "policy-123", + Enabled: &enabled, + } + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + actionID, err := clusters.AttachPolicy(serviceClient, clusterID, attachPolicyOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Println("Attach Policy actionID", actionID) + +Example to Detach a Policy to Cluster + + detachpolicyOpts := clusters.DetachPolicyOpts{ + PolicyID: "policy-123", + } + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + actionID, err := clusters.DetachPolicy(serviceClient, clusterID, detachpolicyOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Println("Update Policy actionID", actionID) + +Example to Update a Policy to a Cluster + + enabled := true + updatePolicyOpts := clusters.UpdatePolicyOpts{ + PolicyID: "policy-123", + Enabled: &enabled, + } + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + actionID, err := clusters.UpdatePolicy(serviceClient, clusterID, updatePolicyOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Println("Attach Policy actionID", actionID) + +Example to Recover a Cluster + + check := true + checkCapacity := true + recoverOpts := clusters.RecoverOpts{ + Operation: clusters.RebuildRecovery, + Check: &check, + CheckCapacity: &checkCapacity, + } + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + actionID, err := clusters.Recover(computeClient, clusterID, recoverOpts).Extract() + if err != nil { + panic(err) + } + fmt.Println("action=", actionID) + +Example to Check a Cluster + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + action, err := clusters.Check(computeClient, clusterID).Extract() + if err != nil { + panic(err) + } + +Example to Complete Life Cycle + + clusterID := "b7b870e3-d3c5-4a93-b9d7-846c53b2c2da" + lifecycleOpts := clusters.CompleteLifecycleOpts{LifecycleActionTokenID: "2b827124-69e1-496e-9484-33ca769fe4df"} + + action, err := clusters.CompleteLifecycle(computeClient, clusterID, lifecycleOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package clusters diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/requests.go new file mode 100644 index 000000000000..2c97e202fce3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/requests.go @@ -0,0 +1,525 @@ +package clusters + +import ( + "fmt" + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// AdjustmentType represents valid values for resizing a cluster. +type AdjustmentType string + +const ( + ExactCapacityAdjustment AdjustmentType = "EXACT_CAPACITY" + ChangeInCapacityAdjustment AdjustmentType = "CHANGE_IN_CAPACITY" + ChangeInPercentageAdjustment AdjustmentType = "CHANGE_IN_PERCENTAGE" +) + +// RecoveryAction represents valid values for recovering a cluster. +type RecoveryAction string + +const ( + RebootRecovery RecoveryAction = "REBOOT" + RebuildRecovery RecoveryAction = "REBUILD" + RecreateRecovery RecoveryAction = "RECREATE" +) + +// CreateOptsBuilder allows extensions to add additional parameters +// to the Create request. +type CreateOptsBuilder interface { + ToClusterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a cluster. +type CreateOpts struct { + Name string `json:"name" required:"true"` + DesiredCapacity int `json:"desired_capacity"` + ProfileID string `json:"profile_id" required:"true"` + MinSize *int `json:"min_size,omitempty"` + Timeout int `json:"timeout,omitempty"` + MaxSize int `json:"max_size,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + Config map[string]interface{} `json:"config,omitempty"` +} + +// ToClusterCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "cluster") +} + +// Create requests the creation of a new cluster. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToClusterCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Get retrieves details of a single cluster. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request. +type ListOptsBuilder interface { + ToClusterListQuery() (string, error) +} + +// ListOpts represents options to list clusters. +type ListOpts struct { + Limit int `q:"limit"` + Marker string `q:"marker"` + Sort string `q:"sort"` + GlobalProject *bool `q:"global_project"` + Name string `q:"name,omitempty"` + Status string `q:"status,omitempty"` +} + +// ToClusterListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToClusterListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of clusters. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToClusterListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ClusterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToClusterUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options to update a cluster. +type UpdateOpts struct { + Config string `json:"config,omitempty"` + Name string `json:"name,omitempty"` + ProfileID string `json:"profile_id,omitempty"` + Timeout *int `json:"timeout,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + ProfileOnly *bool `json:"profile_only,omitempty"` +} + +// ToClusterUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToClusterUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "cluster") + if err != nil { + return nil, err + } + return b, nil +} + +// Update will update an existing cluster. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToClusterUpdateMap() + if err != nil { + r.Err = err + return r + } + + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Delete deletes the specified cluster ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. +type ResizeOptsBuilder interface { + ToClusterResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents options for resizing a cluster. +type ResizeOpts struct { + AdjustmentType AdjustmentType `json:"adjustment_type,omitempty"` + Number interface{} `json:"number,omitempty"` + MinSize *int `json:"min_size,omitempty"` + MaxSize *int `json:"max_size,omitempty"` + MinStep *int `json:"min_step,omitempty"` + Strict *bool `json:"strict,omitempty"` +} + +// ToClusterResizeMap constructs a request body from ResizeOpts. +func (opts ResizeOpts) ToClusterResizeMap() (map[string]interface{}, error) { + if opts.AdjustmentType != "" && opts.Number == nil { + return nil, fmt.Errorf("Number field MUST NOT be empty when AdjustmentType field used") + } + + switch opts.Number.(type) { + case nil, int, int32, int64: + // Valid type. Always allow + case float32, float64: + if opts.AdjustmentType != ChangeInPercentageAdjustment { + return nil, fmt.Errorf("Only ChangeInPercentageAdjustment allows float value for Number field") + } + default: + return nil, fmt.Errorf("Number field must be either int, float, or omitted") + } + + return gophercloud.BuildRequestBody(opts, "resize") +} + +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterResizeMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ScaleInOptsBuilder allows extensions to add additional parameters to the +// ScaleIn request. +type ScaleInOptsBuilder interface { + ToClusterScaleInMap() (map[string]interface{}, error) +} + +// ScaleInOpts represents options used to scale-in a cluster. +type ScaleInOpts struct { + Count *int `json:"count,omitempty"` +} + +// ToClusterScaleInMap constructs a request body from ScaleInOpts. +func (opts ScaleInOpts) ToClusterScaleInMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "scale_in") +} + +// ScaleIn will reduce the capacity of a cluster. +func ScaleIn(client *gophercloud.ServiceClient, id string, opts ScaleInOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterScaleInMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ScaleOutOptsBuilder allows extensions to add additional parameters to the +// ScaleOut request. +type ScaleOutOptsBuilder interface { + ToClusterScaleOutMap() (map[string]interface{}, error) +} + +// ScaleOutOpts represents options used to scale-out a cluster. +type ScaleOutOpts struct { + Count int `json:"count,omitempty"` +} + +// ToClusterScaleOutMap constructs a request body from ScaleOutOpts. +func (opts ScaleOutOpts) ToClusterScaleOutMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "scale_out") +} + +// ScaleOut will increase the capacity of a cluster. +func ScaleOut(client *gophercloud.ServiceClient, id string, opts ScaleOutOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterScaleOutMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// AttachPolicyOptsBuilder allows extensions to add additional parameters to the +// AttachPolicy request. +type AttachPolicyOptsBuilder interface { + ToClusterAttachPolicyMap() (map[string]interface{}, error) +} + +// PolicyOpts params +type AttachPolicyOpts struct { + PolicyID string `json:"policy_id" required:"true"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToClusterAttachPolicyMap constructs a request body from AttachPolicyOpts. +func (opts AttachPolicyOpts) ToClusterAttachPolicyMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy_attach") +} + +// Attach Policy will attach a policy to a cluster. +func AttachPolicy(client *gophercloud.ServiceClient, id string, opts AttachPolicyOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterAttachPolicyMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// UpdatePolicyOptsBuilder allows extensions to add additional parameters to the +// UpdatePolicy request. +type UpdatePolicyOptsBuilder interface { + ToClusterUpdatePolicyMap() (map[string]interface{}, error) +} + +// UpdatePolicyOpts represents options used to update a cluster policy. +type UpdatePolicyOpts struct { + PolicyID string `json:"policy_id" required:"true"` + Enabled *bool `json:"enabled,omitempty" required:"true"` +} + +// ToClusterUpdatePolicyMap constructs a request body from UpdatePolicyOpts. +func (opts UpdatePolicyOpts) ToClusterUpdatePolicyMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy_update") +} + +// UpdatePolicy will update a cluster's policy. +func UpdatePolicy(client *gophercloud.ServiceClient, id string, opts UpdatePolicyOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterUpdatePolicyMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// DetachPolicyOptsBuilder allows extensions to add additional parameters to the +// DetachPolicy request. +type DetachPolicyOptsBuilder interface { + ToClusterDetachPolicyMap() (map[string]interface{}, error) +} + +// DetachPolicyOpts represents options used to detach a policy from a cluster. +type DetachPolicyOpts struct { + PolicyID string `json:"policy_id" required:"true"` +} + +// ToClusterDetachPolicyMap constructs a request body from DetachPolicyOpts. +func (opts DetachPolicyOpts) ToClusterDetachPolicyMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy_detach") +} + +// DetachPolicy will detach a policy from a cluster. +func DetachPolicy(client *gophercloud.ServiceClient, id string, opts DetachPolicyOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterDetachPolicyMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ListPolicyOptsBuilder allows extensions to add additional parameters to the +// ListPolicies request. +type ListPoliciesOptsBuilder interface { + ToClusterPoliciesListQuery() (string, error) +} + +// ListPoliciesOpts represents options to list a cluster's policies. +type ListPoliciesOpts struct { + Enabled *bool `q:"enabled"` + Name string `q:"policy_name"` + Type string `q:"policy_type"` + Sort string `q:"sort"` +} + +// ToClusterPoliciesListQuery formats a ListOpts into a query string. +func (opts ListPoliciesOpts) ToClusterPoliciesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListPolicies instructs OpenStack to provide a list of policies for a cluster. +func ListPolicies(client *gophercloud.ServiceClient, clusterID string, opts ListPoliciesOptsBuilder) pagination.Pager { + url := listPoliciesURL(client, clusterID) + if opts != nil { + query, err := opts.ToClusterPoliciesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ClusterPolicyPage{pagination.SinglePageBase(r)} + }) +} + +// GetPolicy retrieves details of a cluster policy. +func GetPolicy(client *gophercloud.ServiceClient, clusterID string, policyID string) (r GetPolicyResult) { + _, r.Err = client.Get(getPolicyURL(client, clusterID, policyID), &r.Body, nil) + return +} + +// RecoverOptsBuilder allows extensions to add additional parameters to the +// Recover request. +type RecoverOptsBuilder interface { + ToClusterRecoverMap() (map[string]interface{}, error) +} + +// RecoverOpts represents options used to recover a cluster. +type RecoverOpts struct { + Operation RecoveryAction `json:"operation,omitempty"` + Check *bool `json:"check,omitempty"` + CheckCapacity *bool `json:"check_capacity,omitempty"` +} + +// ToClusterRecovermap constructs a request body from RecoverOpts. +func (opts RecoverOpts) ToClusterRecoverMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "recover") +} + +// Recover implements cluster recover request. +func Recover(client *gophercloud.ServiceClient, id string, opts RecoverOptsBuilder) (r ActionResult) { + b, err := opts.ToClusterRecoverMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Check will perform a health check on a cluster. +func Check(client *gophercloud.ServiceClient, id string) (r ActionResult) { + b := map[string]interface{}{ + "check": map[string]interface{}{}, + } + + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ToClusterCompleteLifecycleMap constructs a request body from CompleteLifecycleOpts. +func (opts CompleteLifecycleOpts) ToClusterCompleteLifecycleMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "complete_lifecycle") +} + +type CompleteLifecycleOpts struct { + LifecycleActionTokenID string `json:"lifecycle_action_token" required:"true"` +} + +func CompleteLifecycle(client *gophercloud.ServiceClient, id string, opts CompleteLifecycleOpts) (r ActionResult) { + b, err := opts.ToClusterCompleteLifecycleMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/results.go new file mode 100644 index 000000000000..b9d7aa2be8c0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/results.go @@ -0,0 +1,201 @@ +package clusters + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Cluster represents an OpenStack Clustering cluster. +type Cluster struct { + Config map[string]interface{} `json:"config"` + CreatedAt time.Time `json:"-"` + Data map[string]interface{} `json:"data"` + Dependents map[string]interface{} `json:"dependents"` + DesiredCapacity int `json:"desired_capacity"` + Domain string `json:"domain"` + ID string `json:"id"` + InitAt time.Time `json:"-"` + MaxSize int `json:"max_size"` + Metadata map[string]interface{} `json:"metadata"` + MinSize int `json:"min_size"` + Name string `json:"name"` + Nodes []string `json:"nodes"` + Policies []string `json:"policies"` + ProfileID string `json:"profile_id"` + ProfileName string `json:"profile_name"` + Project string `json:"project"` + Status string `json:"status"` + StatusReason string `json:"status_reason"` + Timeout int `json:"timeout"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Cluster) UnmarshalJSON(b []byte) error { + type tmp Cluster + var s struct { + tmp + CreatedAt string `json:"created_at"` + InitAt string `json:"init_at"` + UpdatedAt string `json:"updated_at"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Cluster(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(gophercloud.RFC3339Milli, s.CreatedAt) + if err != nil { + return err + } + } + + if s.InitAt != "" { + r.InitAt, err = time.Parse(gophercloud.RFC3339Milli, s.InitAt) + if err != nil { + return err + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(gophercloud.RFC3339Milli, s.UpdatedAt) + if err != nil { + return err + } + } + + return nil +} + +// ClusterPolicy represents and OpenStack Clustering cluster policy. +type ClusterPolicy struct { + ClusterID string `json:"cluster_id"` + ClusterName string `json:"cluster_name"` + Enabled bool `json:"enabled"` + ID string `json:"id"` + PolicyID string `json:"policy_id"` + PolicyName string `json:"policy_name"` + PolicyType string `json:"policy_type"` +} + +// Action represents an OpenStack Clustering action. +type Action struct { + Action string `json:"action"` +} + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult-based result as a Cluster. +func (r commonResult) Extract() (*Cluster, error) { + var s struct { + Cluster *Cluster `json:"cluster"` + } + + err := r.ExtractInto(&s) + return s.Cluster, err +} + +// CreateResult is the response of a Create operations. Call its Extract method +// to interpret it as a Cluster. +type CreateResult struct { + commonResult +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Cluster. +type GetResult struct { + commonResult +} + +// UpdateResult is the response of a Update operations. Call its Extract method +// to interpret it as a Cluster. +type UpdateResult struct { + commonResult +} + +// GetPolicyResult is the response of a Get operations. Call its Extract method +// to interpret it as a ClusterPolicy. +type GetPolicyResult struct { + gophercloud.Result +} + +// Extract interprets a GetPolicyResult as a ClusterPolicy. +func (r GetPolicyResult) Extract() (*ClusterPolicy, error) { + var s struct { + ClusterPolicy *ClusterPolicy `json:"cluster_policy"` + } + err := r.ExtractInto(&s) + return s.ClusterPolicy, err +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ClusterPage contains a single page of all clusters from a List call. +type ClusterPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Clusters contains any results. +func (page ClusterPage) IsEmpty() (bool, error) { + clusters, err := ExtractClusters(page) + return len(clusters) == 0, err +} + +// ClusterPolicyPage contains a single page of all policies from a List call +type ClusterPolicyPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of ClusterPolicies contains any +// results. +func (page ClusterPolicyPage) IsEmpty() (bool, error) { + clusterPolicies, err := ExtractClusterPolicies(page) + return len(clusterPolicies) == 0, err +} + +// ActionResult is the response of Senlin actions. Call its Extract method to +// obtain the Action ID of the action. +type ActionResult struct { + gophercloud.Result +} + +// Extract interprets any Action result as an Action. +func (r ActionResult) Extract() (string, error) { + var s struct { + Action string `json:"action"` + } + err := r.ExtractInto(&s) + return s.Action, err +} + +// ExtractClusters returns a slice of Clusters from the List operation. +func ExtractClusters(r pagination.Page) ([]Cluster, error) { + var s struct { + Clusters []Cluster `json:"clusters"` + } + err := (r.(ClusterPage)).ExtractInto(&s) + return s.Clusters, err +} + +// ExtractClusterPolicies returns a slice of ClusterPolicies from the +// ListClusterPolicies operation. +func ExtractClusterPolicies(r pagination.Page) ([]ClusterPolicy, error) { + var s struct { + ClusterPolicies []ClusterPolicy `json:"cluster_policies"` + } + err := (r.(ClusterPolicyPage)).ExtractInto(&s) + return s.ClusterPolicies, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/doc.go new file mode 100644 index 000000000000..022157789316 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_clusters_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/fixtures.go new file mode 100644 index 000000000000..26c84821c7db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/fixtures.go @@ -0,0 +1,624 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ClusterResponse = ` +{ + "cluster": { + "config": {}, + "created_at": "2015-02-10T14:26:14Z", + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": "2015-02-10T15:26:14Z", + "max_size": 20, + "metadata": {}, + "min_size": 1, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": "2015-02-10T16:26:14Z", + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedCluster = clusters.Cluster{ + Config: map[string]interface{}{}, + CreatedAt: time.Date(2015, 2, 10, 14, 26, 14, 0, time.UTC), + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + DesiredCapacity: 3, + Domain: "", + ID: "7d85f602-a948-4a30-afd4-e84f47471c15", + InitAt: time.Date(2015, 2, 10, 15, 26, 14, 0, time.UTC), + MaxSize: 20, + Metadata: map[string]interface{}{}, + MinSize: 1, + Name: "cluster1", + Nodes: []string{ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac", + }, + Policies: []string{}, + ProfileID: "edc63d0a-2ca4-48fa-9854-27926da76a4a", + ProfileName: "mystack", + Project: "6e18cc2bdbeb48a5b3cad2dc499f6804", + Status: "ACTIVE", + StatusReason: "Cluster scale-in succeeded", + Timeout: 3600, + UpdatedAt: time.Date(2015, 2, 10, 16, 26, 14, 0, time.UTC), + User: "5e5bf8027826429c96af157f68dc9072", +} + +const ClusterResponse_EmptyTime = ` +{ + "cluster": { + "config": {}, + "created_at": null, + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": null, + "max_size": 20, + "metadata": {}, + "min_size": 1, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": null, + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedCluster_EmptyTime = clusters.Cluster{ + Config: map[string]interface{}{}, + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + DesiredCapacity: 3, + Domain: "", + ID: "7d85f602-a948-4a30-afd4-e84f47471c15", + MaxSize: 20, + Metadata: map[string]interface{}{}, + MinSize: 1, + Name: "cluster1", + Nodes: []string{ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac", + }, + Policies: []string{}, + ProfileID: "edc63d0a-2ca4-48fa-9854-27926da76a4a", + ProfileName: "mystack", + Project: "6e18cc2bdbeb48a5b3cad2dc499f6804", + Status: "ACTIVE", + StatusReason: "Cluster scale-in succeeded", + Timeout: 3600, + User: "5e5bf8027826429c96af157f68dc9072", +} + +const ClusterResponse_Metadata = ` +{ + "cluster": { + "config": {}, + "created_at": "2015-02-10T14:26:14Z", + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": "2015-02-10T15:26:14Z", + "max_size": 20, + "metadata": { + "test": { + "nil_interface": null, + "bool_value": false, + "string_value": "test_string", + "float_value": 123.3 + }, + "foo": "bar" + }, + "min_size": 1, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": "2015-02-10T16:26:14Z", + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedCluster_Metadata = clusters.Cluster{ + Config: map[string]interface{}{}, + CreatedAt: time.Date(2015, 2, 10, 14, 26, 14, 0, time.UTC), + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + DesiredCapacity: 3, + Domain: "", + ID: "7d85f602-a948-4a30-afd4-e84f47471c15", + InitAt: time.Date(2015, 2, 10, 15, 26, 14, 0, time.UTC), + MaxSize: 20, + MinSize: 1, + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Name: "cluster1", + Nodes: []string{ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac", + }, + Policies: []string{}, + ProfileID: "edc63d0a-2ca4-48fa-9854-27926da76a4a", + ProfileName: "mystack", + Project: "6e18cc2bdbeb48a5b3cad2dc499f6804", + Status: "ACTIVE", + StatusReason: "Cluster scale-in succeeded", + Timeout: 3600, + UpdatedAt: time.Date(2015, 2, 10, 16, 26, 14, 0, time.UTC), + User: "5e5bf8027826429c96af157f68dc9072", +} + +const ListResponse = ` +{ + "clusters": [ + { + "config": {}, + "created_at": "2015-02-10T14:26:14Z", + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": "2015-02-10T15:26:14Z", + "max_size": 20, + "min_size": 1, + "metadata": {}, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": "2015-02-10T16:26:14Z", + "user": "5e5bf8027826429c96af157f68dc9072" + }, + { + "config": {}, + "created_at": null, + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": null, + "max_size": 20, + "metadata": {}, + "min_size": 1, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": null, + "user": "5e5bf8027826429c96af157f68dc9072" + } + ] +}` + +var ExpectedClusters = []clusters.Cluster{ExpectedCluster, ExpectedCluster_EmptyTime} + +const UpdateResponse = ` +{ + "cluster": { + "config": {}, + "created_at": "2015-02-10T14:26:14Z", + "data": {}, + "dependents": {}, + "desired_capacity": 4, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": "2015-02-10T15:26:14Z", + "max_size": -1, + "metadata": {}, + "min_size": 0, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "profile1", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": "2015-02-10T16:26:14Z", + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +const UpdateResponse_EmptyTime = ` +{ + "cluster": { + "config": {}, + "created_at": null, + "data": {}, + "dependents": {}, + "desired_capacity": 3, + "domain": null, + "id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "init_at": null, + "max_size": 20, + "metadata": {}, + "min_size": 1, + "name": "cluster1", + "nodes": [ + "b07c57c8-7ab2-47bf-bdf8-e894c0c601b9", + "ecc23d3e-bb68-48f8-8260-c9cf6bcb6e61", + "da1e9c87-e584-4626-a120-022da5062dac" + ], + "policies": [], + "profile_id": "edc63d0a-2ca4-48fa-9854-27926da76a4a", + "profile_name": "mystack", + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "status": "ACTIVE", + "status_reason": "Cluster scale-in succeeded", + "timeout": 3600, + "updated_at": null, + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +const ActionResponse = ` +{ + "action": "2a0ff107-e789-4660-a122-3816c43af703" +}` + +const ExpectedActionID = "2a0ff107-e789-4660-a122-3816c43af703" + +const ListPoliciesResult = `{ + "cluster_policies": [ + { + "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "cluster_name": "cluster4", + "enabled": true, + "id": "06be3a1f-b238-4a96-a737-ceec5714087e", + "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", + "policy_name": "dp01", + "policy_type": "senlin.policy.deletion-1.0" + } + ] +}` + +var ExpectedClusterPolicy = clusters.ClusterPolicy{ + ClusterID: "7d85f602-a948-4a30-afd4-e84f47471c15", + ClusterName: "cluster4", + Enabled: true, + ID: "06be3a1f-b238-4a96-a737-ceec5714087e", + PolicyID: "714fe676-a08f-4196-b7af-61d52eeded15", + PolicyName: "dp01", + PolicyType: "senlin.policy.deletion-1.0", +} + +var ExpectedListPolicies = []clusters.ClusterPolicy{ExpectedClusterPolicy} + +const GetPolicyResponse = ` +{ + "cluster_policy": { + "cluster_id": "7d85f602-a948-4a30-afd4-e84f47471c15", + "cluster_name": "cluster4", + "enabled": true, + "id": "06be3a1f-b238-4a96-a737-ceec5714087e", + "policy_id": "714fe676-a08f-4196-b7af-61d52eeded15", + "policy_name": "dp01", + "policy_type": "senlin.policy.deletion-1.0" + } +}` + +func HandleCreateClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.Header().Add("Location", "http://senlin.cloud.blizzard.net:8778/v1/actions/625628cd-f877-44be-bde0-fec79f84e13d") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse) + }) +} + +func HandleCreateClusterEmptyTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse_EmptyTime) + }) +} + +func HandleCreateClusterMetadataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse_Metadata) + }) +} + +func HandleGetClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse) + }) +} + +func HandleGetClusterEmptyTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse_EmptyTime) + }) +} + +func HandleListClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ListResponse) + }) +} + +func HandleUpdateClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/"+ExpectedCluster.ID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterResponse) + }) +} + +func HandleUpdateClusterEmptyTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/"+ExpectedCluster_EmptyTime.ID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse_EmptyTime) + }) +} + +func HandleDeleteClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandleResizeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleScaleInSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/edce3528-864f-41fb-8759-f4707925cc09/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleScaleOutSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/edce3528-864f-41fb-8759-f4707925cc09/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleListPoliciesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ListPoliciesResult) + }) +} + +func HandleGetPolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/policies/714fe676-a08f-4196-b7af-61d52eeded15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, GetPolicyResponse) + }) +} + +func HandleRecoverSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/edce3528-864f-41fb-8759-f4707925cc09/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleAttachPolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleDetachPolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleUpdatePolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/7d85f602-a948-4a30-afd4-e84f47471c15/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleCheckSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/edce3528-864f-41fb-8759-f4707925cc09/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ActionResponse) + }) +} + +func HandleLifecycleSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/edce3528-864f-41fb-8759-f4707925cc09/actions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.Header().Add("Location", "http://senlin.cloud.blizzard.net:8778/v1/actions/2a0ff107-e789-4660-a122-3816c43af703") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprint(w, ActionResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/requests_test.go new file mode 100644 index 000000000000..ba4f05a3a36c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/testing/requests_test.go @@ -0,0 +1,428 @@ +package testing + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateClusterSuccessfully(t) + + minSize := 1 + opts := clusters.CreateOpts{ + Name: "cluster1", + DesiredCapacity: 3, + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + MinSize: &minSize, + MaxSize: 20, + Timeout: 3600, + Metadata: map[string]interface{}{}, + Config: map[string]interface{}{}, + } + + res := clusters.Create(fake.ServiceClient(), opts) + th.AssertNoErr(t, res.Err) + + location := res.Header.Get("Location") + th.AssertEquals(t, "http://senlin.cloud.blizzard.net:8778/v1/actions/625628cd-f877-44be-bde0-fec79f84e13d", location) + + locationFields := strings.Split(location, "actions/") + actionID := locationFields[1] + th.AssertEquals(t, "625628cd-f877-44be-bde0-fec79f84e13d", actionID) + + actual, err := res.Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster, *actual) +} + +func TestCreateClusterEmptyTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateClusterEmptyTimeSuccessfully(t) + + minSize := 1 + opts := clusters.CreateOpts{ + Name: "cluster1", + DesiredCapacity: 3, + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + MinSize: &minSize, + MaxSize: 20, + Timeout: 3600, + Metadata: map[string]interface{}{}, + Config: map[string]interface{}{}, + } + + actual, err := clusters.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster_EmptyTime, *actual) +} + +func TestCreateClusterMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateClusterMetadataSuccessfully(t) + + minSize := 1 + opts := clusters.CreateOpts{ + Name: "cluster1", + DesiredCapacity: 3, + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + MinSize: &minSize, + MaxSize: 20, + Timeout: 3600, + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Config: map[string]interface{}{}, + } + + actual, err := clusters.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster_Metadata, *actual) +} + +func TestGetCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetClusterSuccessfully(t) + + actual, err := clusters.Get(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster, *actual) +} + +func TestGetClusterEmptyTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetClusterEmptyTimeSuccessfully(t) + + actual, err := clusters.Get(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster_EmptyTime, *actual) +} + +func TestListClusters(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListClusterSuccessfully(t) + + count := 0 + + clusters.List(fake.ServiceClient(), clusters.ListOpts{GlobalProject: new(bool)}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := clusters.ExtractClusters(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedClusters, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestUpdateCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateClusterSuccessfully(t) + + updateOpts := clusters.UpdateOpts{ + Name: "cluster1", + ProfileID: "edc63d0a-2ca4-48fa-9854-27926da76a4a", + } + + actual, err := clusters.Update(fake.ServiceClient(), ExpectedCluster.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster, *actual) +} + +func TestUpdateClusterEmptyTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateClusterEmptyTimeSuccessfully(t) + + updateOpts := clusters.UpdateOpts{ + Name: "cluster1", + ProfileID: "edc63d0a-2ca4-48fa-9854-27926da76a4a", + } + + actual, err := clusters.Update(fake.ServiceClient(), ExpectedCluster_EmptyTime.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCluster_EmptyTime, *actual) +} + +func TestDeleteCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteClusterSuccessfully(t) + + err := clusters.Delete(fake.ServiceClient(), "6dc6d336e3fc4c0a951b5698cd1236ee").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestResizeCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleResizeSuccessfully(t) + + maxSize := 5 + minSize := 1 + number := -2 + strict := true + opts := clusters.ResizeOpts{ + AdjustmentType: "CHANGE_IN_CAPACITY", + MaxSize: &maxSize, + MinSize: &minSize, + Number: number, + Strict: &strict, + } + + actionID, err := clusters.Resize(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15", opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +// Test case for Number field having a float value +func TestResizeClusterNumberFloat(t *testing.T) { + maxSize := 5 + minSize := 1 + number := 100.0 + strict := true + opts := clusters.ResizeOpts{ + AdjustmentType: "CHANGE_IN_PERCENTAGE", + MaxSize: &maxSize, + MinSize: &minSize, + Number: number, + Strict: &strict, + } + + _, err := opts.ToClusterResizeMap() + th.AssertNoErr(t, err) +} + +// Test case for missing Number field. +func TestResizeClusterMissingNumber(t *testing.T) { + maxSize := 5 + minSize := 1 + strict := true + opts := clusters.ResizeOpts{ + MaxSize: &maxSize, + MinSize: &minSize, + Strict: &strict, + } + + _, err := opts.ToClusterResizeMap() + th.AssertNoErr(t, err) +} + +// Test case for missing Number field which is required when AdjustmentType is specified +func TestResizeClusterInvalidParamsMissingNumber(t *testing.T) { + maxSize := 5 + minSize := 1 + strict := true + opts := clusters.ResizeOpts{ + AdjustmentType: "CHANGE_IN_CAPACITY", + MaxSize: &maxSize, + MinSize: &minSize, + Strict: &strict, + } + + _, err := opts.ToClusterResizeMap() + isValid := err == nil + th.AssertEquals(t, false, isValid) +} + +// Test case for float Number field which is only valid for CHANGE_IN_PERCENTAGE. +func TestResizeClusterInvalidParamsNumberFloat(t *testing.T) { + maxSize := 5 + minSize := 1 + number := 100.0 + strict := true + opts := clusters.ResizeOpts{ + AdjustmentType: "CHANGE_IN_CAPACITY", + MaxSize: &maxSize, + MinSize: &minSize, + Number: number, + Strict: &strict, + } + + _, err := opts.ToClusterResizeMap() + isValid := err == nil + th.AssertEquals(t, false, isValid) +} + +func TestClusterScaleIn(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleScaleInSuccessfully(t) + + count := 5 + scaleOpts := clusters.ScaleInOpts{ + Count: &count, + } + actionID, err := clusters.ScaleIn(fake.ServiceClient(), "edce3528-864f-41fb-8759-f4707925cc09", scaleOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestListClusterPolicies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListPoliciesSuccessfully(t) + + pageCount := 0 + err := clusters.ListPolicies(fake.ServiceClient(), ExpectedClusterPolicy.ClusterID, clusters.ListPoliciesOpts{Name: "Test"}).EachPage(func(page pagination.Page) (bool, error) { + pageCount++ + actual, err := clusters.ExtractClusterPolicies(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedListPolicies, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, pageCount, 1) +} + +func TestGetClusterPolicies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetPolicySuccessfully(t) + + actual, err := clusters.GetPolicy(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15", "714fe676-a08f-4196-b7af-61d52eeded15").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedClusterPolicy, *actual) +} + +func TestClusterRecover(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleRecoverSuccessfully(t) + + recoverOpts := clusters.RecoverOpts{ + Operation: clusters.RebuildRecovery, + Check: new(bool), + CheckCapacity: new(bool), + } + actionID, err := clusters.Recover(fake.ServiceClient(), "edce3528-864f-41fb-8759-f4707925cc09", recoverOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestAttachPolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleAttachPolicySuccessfully(t) + + enabled := true + opts := clusters.AttachPolicyOpts{ + PolicyID: "policy1", + Enabled: &enabled, + } + actionID, err := clusters.AttachPolicy(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15", opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestDetachPolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDetachPolicySuccessfully(t) + + opts := clusters.DetachPolicyOpts{ + PolicyID: "policy1", + } + actionID, err := clusters.DetachPolicy(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15", opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestUpdatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdatePolicySuccessfully(t) + + enabled := true + opts := clusters.UpdatePolicyOpts{ + PolicyID: "policy1", + Enabled: &enabled, + } + actionID, err := clusters.UpdatePolicy(fake.ServiceClient(), "7d85f602-a948-4a30-afd4-e84f47471c15", opts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestClusterScaleOut(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleScaleOutSuccessfully(t) + + scaleOutOpts := clusters.ScaleOutOpts{ + Count: 5, + } + actionID, err := clusters.ScaleOut(fake.ServiceClient(), "edce3528-864f-41fb-8759-f4707925cc09", scaleOutOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestClusterCheck(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCheckSuccessfully(t) + + actionID, err := clusters.Check(fake.ServiceClient(), "edce3528-864f-41fb-8759-f4707925cc09").Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedActionID, actionID) +} + +func TestLifecycle(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleLifecycleSuccessfully(t) + + opts := clusters.CompleteLifecycleOpts{ + LifecycleActionTokenID: "976528c6-dcf6-4d8d-9f4c-588f4e675f29", + } + + res := clusters.CompleteLifecycle(fake.ServiceClient(), "edce3528-864f-41fb-8759-f4707925cc09", opts) + location := res.Header.Get("Location") + th.AssertEquals(t, "http://senlin.cloud.blizzard.net:8778/v1/actions/2a0ff107-e789-4660-a122-3816c43af703", location) + + actionID, err := res.Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, "2a0ff107-e789-4660-a122-3816c43af703", actionID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/urls.go new file mode 100644 index 000000000000..a4e0db0c1f0f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/clusters/urls.go @@ -0,0 +1,46 @@ +package clusters + +import "github.com/gophercloud/gophercloud" + +var apiVersion = "v1" +var apiName = "clusters" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id, "actions") +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listPoliciesURL(client *gophercloud.ServiceClient, clusterID string) string { + return client.ServiceURL(apiVersion, apiName, clusterID, "policies") +} + +func getPolicyURL(client *gophercloud.ServiceClient, clusterID string, policyID string) string { + return client.ServiceURL(apiVersion, apiName, clusterID, "policies", policyID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/doc.go new file mode 100644 index 000000000000..3257b8d12dc1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/doc.go @@ -0,0 +1,74 @@ +/* +Package nodes provides information and interaction with the nodes through +the OpenStack Clustering service. + +Example to Create a Node + + opts := nodes.CreateOpts{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + Metadata: map[string]interface{}{}, + Name: "node-e395be1e-002", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + Role: "", + } + + node, err := nodes.Create(serviceClient, opts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("node", node) + +Example to List Nodes + + listOpts := nodes.ListOpts{ + Name: "testnode", + } + + allPages, err := nodes.List(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allNodes, err := nodes.ExtractNodes(allPages) + if err != nil { + panic(err) + } + + for _, node := range allNodes { + fmt.Printf("%+v\n", node) + } + +Example to Update a Node + + opts := nodes.UpdateOpts{ + Name: "new-node-name", + } + + nodeID := "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1" + node, err := nodes.Update(serviceClient, nodeID, opts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", node) + +Example to Delete a Node + + nodeID := "6dc6d336e3fc4c0a951b5698cd1236ee" + err := nodes.Delete(serviceClient, nodeID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get a Node + + nodeID := "node123" + node, err := nodes.Get(serviceClient, nodeID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", node) +*/ +package nodes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/requests.go new file mode 100644 index 000000000000..98bbd6d73061 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/requests.go @@ -0,0 +1,146 @@ +package nodes + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToNodeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a Node. +type CreateOpts struct { + Role string `json:"role,omitempty"` + ProfileID string `json:"profile_id" required:"true"` + ClusterID string `json:"cluster_id,omitempty"` + Name string `json:"name" required:"true"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToNodeCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToNodeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "node") +} + +// Create requests the creation of a new node. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToNodeCreateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToNodeUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a Node. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + ProfileID string `json:"profile_id,omitempty"` + Role string `json:"role,omitempty"` + Metadata map[string]interface{} `json:"metadata,omitempty"` +} + +// ToClusterUpdateMap constructs a request body from UpdateOpts. +func (opts UpdateOpts) ToNodeUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "node") +} + +// Update requests the update of a node. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToNodeUpdateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ListOptsBuilder allows extensions to add additional parmeters to the +// List request. +type ListOptsBuilder interface { + ToNodeListQuery() (string, error) +} + +// ListOpts represents options used to list nodes. +type ListOpts struct { + Limit int `q:"limit"` + Marker string `q:"marker"` + Sort string `q:"sort"` + GlobalProject *bool `q:"global_project"` + ClusterID string `q:"cluster_id"` + Name string `q:"name"` + Status string `q:"status"` +} + +// ToNodeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNodeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of nodes. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToNodeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return NodePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete deletes the specified node. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Get makes a request against senlin to get a details of a node type +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + if r.Err == nil { + r.Header = result.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/results.go new file mode 100644 index 000000000000..57987c27f365 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/results.go @@ -0,0 +1,129 @@ +package nodes + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Node represents an OpenStack clustering node. +type Node struct { + ClusterID string `json:"cluster_id"` + CreatedAt time.Time `json:"-"` + Data map[string]interface{} `json:"data"` + Dependents map[string]interface{} `json:"dependents"` + Domain string `json:"domain"` + ID string `json:"id"` + Index int `json:"index"` + InitAt time.Time `json:"-"` + Metadata map[string]interface{} `json:"metadata"` + Name string `json:"name"` + PhysicalID string `json:"physical_id"` + ProfileID string `json:"profile_id"` + ProfileName string `json:"profile_name"` + Project string `json:"project"` + Role string `json:"role"` + Status string `json:"status"` + StatusReason string `json:"status_reason"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Node) UnmarshalJSON(b []byte) error { + type tmp Node + var s struct { + tmp + CreatedAt string `json:"created_at"` + InitAt string `json:"init_at"` + UpdatedAt string `json:"updated_at"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Node(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt) + if err != nil { + return err + } + } + + if s.InitAt != "" { + r.InitAt, err = time.Parse(time.RFC3339, s.InitAt) + if err != nil { + return err + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt) + if err != nil { + return err + } + } + + return nil +} + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult-based result as a Node. +func (r commonResult) Extract() (*Node, error) { + var s struct { + Node *Node `json:"node"` + } + err := r.ExtractInto(&s) + return s.Node, err +} + +// CreateResult is the result of a Create operation. Call its Extract +// method to intepret it as a Node. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret it as a Node. +type GetResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update operation. Call its Extract method +// to interpet it as a Node. +type UpdateResult struct { + commonResult +} + +// NodePage contains a single page of all nodes from a List call. +type NodePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a NodePage contains any results. +func (page NodePage) IsEmpty() (bool, error) { + nodes, err := ExtractNodes(page) + return len(nodes) == 0, err +} + +// ExtractNodes returns a slice of Nodes from the List operation. +func ExtractNodes(r pagination.Page) ([]Node, error) { + var s struct { + Nodes []Node `json:"nodes"` + } + err := (r.(NodePage)).ExtractInto(&s) + return s.Nodes, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/doc.go new file mode 100644 index 000000000000..cb76666d2dd2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_nodes_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/fixtures.go new file mode 100644 index 000000000000..14ac6dbd1cf4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/fixtures.go @@ -0,0 +1,316 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const CreateResponse = `{ + "node": { + "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + "created_at": "2016-05-13T07:02:20Z", + "data": { + "internal_ports": [ + { + "network_id": "847e4f65-1ff1-42b1-9e74-74e6a109ad11", + "security_group_ids": ["8db277ab-1d98-4148-ba72-724721789427"], + "fixed_ips": [ + { + "subnet_id": "863b20c0-c011-4650-85c2-ad531f4570a4", + "ip_address": "10.63.177.162" + } + ], + "id": "43aa53d7-a70b-4f40-812f-4feecb687018", + "remove": true + } + ], + "placement": { + "zone": "nova" + } + }, + "dependents": {}, + "domain": "1235be1e-8d8e-43bb-bd6c-943eccf76a6d", + "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + "index": 2, + "init_at": "2016-05-13T08:02:04Z", + "metadata": { + "test": { + "nil_interface": null, + "bool_value": false, + "string_value": "test_string", + "float_value": 123.3 + }, + "foo": "bar" + }, + "name": "node-e395be1e-002", + "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", + "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + "profile_name": "pcirros", + "project": "eee0b7c083e84501bdd50fb269d2a10e", + "role": "", + "status": "ACTIVE", + "status_reason": "Creation succeeded", + "updated_at": null, + "user": "ab79b9647d074e46ac223a8fa297b846" + } +}` + +var ExpectedCreate = nodes.Node{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + CreatedAt: time.Date(2016, 5, 13, 7, 2, 20, 0, time.UTC), + Data: map[string]interface{}{ + "internal_ports": []map[string]interface{}{ + { + "network_id": "847e4f65-1ff1-42b1-9e74-74e6a109ad11", + "security_group_ids": []interface{}{ + "8db277ab-1d98-4148-ba72-724721789427", + }, + "fixed_ips": []interface{}{ + map[string]interface{}{ + "subnet_id": "863b20c0-c011-4650-85c2-ad531f4570a4", + "ip_address": "10.63.177.162", + }, + }, + "id": "43aa53d7-a70b-4f40-812f-4feecb687018", + "remove": true, + }, + }, + "placement": map[string]interface{}{ + "zone": "nova", + }, + }, + Dependents: map[string]interface{}{}, + Domain: "1235be1e-8d8e-43bb-bd6c-943eccf76a6d", + ID: "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + Index: 2, + InitAt: time.Date(2016, 5, 13, 8, 2, 4, 0, time.UTC), + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Name: "node-e395be1e-002", + PhysicalID: "66a81d68-bf48-4af5-897b-a3bfef7279a8", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + ProfileName: "pcirros", + Project: "eee0b7c083e84501bdd50fb269d2a10e", + Role: "", + Status: "ACTIVE", + StatusReason: "Creation succeeded", + User: "ab79b9647d074e46ac223a8fa297b846", +} + +const ListResponse = ` +{ + "nodes": [ + { + "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + "created_at": "2016-05-13T07:02:20Z", + "data": {}, + "dependents": {}, + "domain": null, + "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + "index": 2, + "init_at": "2016-05-13T08:02:04Z", + "metadata": {}, + "name": "node-e395be1e-002", + "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", + "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + "profile_name": "pcirros", + "project": "eee0b7c083e84501bdd50fb269d2a10e", + "role": "", + "status": "ACTIVE", + "status_reason": "Creation succeeded", + "updated_at": "2016-05-13T09:02:04Z", + "user": "ab79b9647d074e46ac223a8fa297b846" } + ] +}` + +var ExpectedList1 = nodes.Node{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + CreatedAt: time.Date(2016, 5, 13, 7, 2, 20, 0, time.UTC), + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + Domain: "", + ID: "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + Index: 2, + InitAt: time.Date(2016, 5, 13, 8, 2, 4, 0, time.UTC), + Metadata: map[string]interface{}{}, + Name: "node-e395be1e-002", + PhysicalID: "66a81d68-bf48-4af5-897b-a3bfef7279a8", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + ProfileName: "pcirros", + Project: "eee0b7c083e84501bdd50fb269d2a10e", + Role: "", + Status: "ACTIVE", + StatusReason: "Creation succeeded", + UpdatedAt: time.Date(2016, 5, 13, 9, 2, 4, 0, time.UTC), + User: "ab79b9647d074e46ac223a8fa297b846", +} + +var ExpectedList = []nodes.Node{ExpectedList1} + +const GetResponse = ` +{ + "node": { + "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + "created_at": "2016-05-13T07:02:20Z", + "data": {}, + "dependents": {}, + "domain": null, + "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + "index": 2, + "init_at": "2016-05-13T07:02:04Z", + "metadata": {"foo": "bar"}, + "name": "node-e395be1e-002", + "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", + "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + "profile_name": "pcirros", + "project": "eee0b7c083e84501bdd50fb269d2a10e", + "role": "", + "status": "ACTIVE", + "status_reason": "Creation succeeded", + "updated_at": "2016-05-13T07:02:20Z", + "user": "ab79b9647d074e46ac223a8fa297b846" + } +}` + +var ExpectedGet = nodes.Node{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + CreatedAt: time.Date(2016, 5, 13, 7, 2, 20, 0, time.UTC), + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + Domain: "", + ID: "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + Index: 2, + InitAt: time.Date(2016, 5, 13, 7, 2, 4, 0, time.UTC), + Metadata: map[string]interface{}{"foo": "bar"}, + Name: "node-e395be1e-002", + PhysicalID: "66a81d68-bf48-4af5-897b-a3bfef7279a8", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + ProfileName: "pcirros", + Project: "eee0b7c083e84501bdd50fb269d2a10e", + Role: "", + Status: "ACTIVE", + StatusReason: "Creation succeeded", + UpdatedAt: time.Date(2016, 5, 13, 7, 2, 20, 0, time.UTC), + User: "ab79b9647d074e46ac223a8fa297b846", +} + +const UpdateResponse = ` +{ + "node": { + "cluster_id": "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + "created_at": "2016-05-13T07:02:20Z", + "data": {}, + "dependents": {}, + "domain": null, + "id": "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + "index": 2, + "init_at": "2016-05-13T08:02:04Z", + "metadata": {"foo":"bar"}, + "name": "node-e395be1e-002", + "physical_id": "66a81d68-bf48-4af5-897b-a3bfef7279a8", + "profile_id": "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + "profile_name": "pcirros", + "project": "eee0b7c083e84501bdd50fb269d2a10e", + "role": "", + "status": "ACTIVE", + "status_reason": "Creation succeeded", + "updated_at": "2016-05-13T09:02:04Z", + "user": "ab79b9647d074e46ac223a8fa297b846" + } +}` + +var ExpectedUpdate = nodes.Node{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + CreatedAt: time.Date(2016, 5, 13, 7, 2, 20, 0, time.UTC), + Data: map[string]interface{}{}, + Dependents: map[string]interface{}{}, + Domain: "", + ID: "82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", + Index: 2, + InitAt: time.Date(2016, 5, 13, 8, 2, 4, 0, time.UTC), + Metadata: map[string]interface{}{"foo": "bar"}, + Name: "node-e395be1e-002", + PhysicalID: "66a81d68-bf48-4af5-897b-a3bfef7279a8", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + ProfileName: "pcirros", + Project: "eee0b7c083e84501bdd50fb269d2a10e", + Role: "", + Status: "ACTIVE", + StatusReason: "Creation succeeded", + UpdatedAt: time.Date(2016, 5, 13, 9, 2, 4, 0, time.UTC), + User: "ab79b9647d074e46ac223a8fa297b846", +} + +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", "req-3791a089-9d46-4671-a3f9-55e95e55d2b4") + w.Header().Add("Location", "http://senlin.cloud.blizzard.net:8778/v1/actions/ffd94dd8-6266-4887-9a8c-5b78b72136da") + + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, CreateResponse) + }) +} + +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/nodes", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ListResponse) + }) +} + +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/nodes/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/nodes/573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, GetResponse) + }) +} + +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/nodes/82fe28e0-9fcb-42ca-a2fa-6eb7dddd75a1", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/requests_test.go new file mode 100644 index 000000000000..1a8e72adc18c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/testing/requests_test.go @@ -0,0 +1,105 @@ +package testing + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateSuccessfully(t) + + createOpts := nodes.CreateOpts{ + ClusterID: "e395be1e-8d8e-43bb-bd6c-943eccf76a6d", + Metadata: map[string]interface{}{ + "foo": "bar", + "test": map[string]interface{}{ + "nil_interface": interface{}(nil), + "float_value": float64(123.3), + "string_value": "test_string", + "bool_value": false, + }, + }, + Name: "node-e395be1e-002", + ProfileID: "d8a48377-f6a3-4af4-bbbb-6e8bcaa0cbc0", + Role: "", + } + + res := nodes.Create(fake.ServiceClient(), createOpts) + th.AssertNoErr(t, res.Err) + + requestID := res.Header.Get("X-Openstack-Request-Id") + th.AssertEquals(t, "req-3791a089-9d46-4671-a3f9-55e95e55d2b4", requestID) + + location := res.Header.Get("Location") + th.AssertEquals(t, "http://senlin.cloud.blizzard.net:8778/v1/actions/ffd94dd8-6266-4887-9a8c-5b78b72136da", location) + + locationFields := strings.Split(location, "actions/") + actionID := locationFields[1] + th.AssertEquals(t, "ffd94dd8-6266-4887-9a8c-5b78b72136da", actionID) + + actual, err := res.Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCreate, *actual) +} + +func TestListNodes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + count := 0 + err := nodes.List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + actual, err := nodes.ExtractNodes(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedList, actual) + count++ + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestDeleteNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteSuccessfully(t) + + deleteResult := nodes.Delete(fake.ServiceClient(), "6dc6d336e3fc4c0a951b5698cd1236ee") + th.AssertNoErr(t, deleteResult.ExtractErr()) +} + +func TestGetNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t) + + actual, err := nodes.Get(fake.ServiceClient(), "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedGet, *actual) +} + +func TestUpdateNode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateSuccessfully(t) + + nodeOpts := nodes.UpdateOpts{ + Name: "node-e395be1e-002", + } + actual, err := nodes.Update(fake.ServiceClient(), ExpectedUpdate.ID, nodeOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedUpdate, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/urls.go new file mode 100644 index 000000000000..5479145d0b1a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/nodes/urls.go @@ -0,0 +1,34 @@ +package nodes + +import "github.com/gophercloud/gophercloud" + +var apiVersion = "v1" +var apiName = "nodes" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/doc.go new file mode 100644 index 000000000000..ffe75f6e1394 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/doc.go @@ -0,0 +1,96 @@ +/* +Package policies provides information and interaction with the policies through +the OpenStack Clustering service. + +Example to List Policies + + listOpts := policies.ListOpts{ + Limit: 2, + } + + allPages, err := policies.List(clusteringClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + + +Example to Create a Policy + + opts := policies.CreateOpts{ + Name: "new_policy", + Spec: policies.Spec{ + Description: "new policy description", + Properties: map[string]interface{}{ + "hooks": map[string]interface{}{ + "type": "zaqar", + "params": map[string]interface{}{ + "queue": "my_zaqar_queue", + }, + "timeout": 10, + }, + }, + Type: "senlin.policy.deletion", + Version: "1.1", + }, + } + + createdPolicy, err := policies.Create(client, opts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Policy + + policyName := "get_policy" + policyDetail, err := policies.Get(clusteringClient, policyName).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policyDetail) + +Example to Update a Policy + + opts := policies.UpdateOpts{ + Name: "update_policy", + } + + updatePolicy, err := policies.Update(client, opts).Extract() + if err != nil { + panic(err) + } + +Example to Validate a Policy + + opts := policies.ValidateOpts{ + Spec: policies.Spec{ + Description: "new policy description", + Properties: map[string]interface{}{ + "hooks": map[string]interface{}{ + "type": "zaqar", + "params": map[string]interface{}{ + "queue": "my_zaqar_queue", + }, + "timeout": 10, + }, + }, + Type: "senlin.policy.deletion", + Version: "1.1", + }, + } + + validatePolicy, err := policies.Validate(client, opts).Extract() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/requests.go new file mode 100644 index 000000000000..f3e2ad249b90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/requests.go @@ -0,0 +1,193 @@ +package policies + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts represents options used to list policies. +type ListOpts struct { + // Limit limits the number of Policies to return. + Limit int `q:"limit"` + + // Marker and Limit control paging. Marker instructs List where to start + // listing from. + Marker string `q:"marker"` + + // Sorts the response by one or more attribute and optional sort direction + // combinations. + Sort string `q:"sort"` + + // GlobalProject indicates whether to include resources for all projects or + // resources for the current project. + GlobalProject *bool `q:"global_project"` + + // Name to filter the response by the specified name property of the object. + Name string `q:"name"` + + // Filter the response by the specified type property of the object. + Type string `q:"type"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to retrieve a list of policies. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := policyListURL(client) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := PolicyPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a policy. +type CreateOpts struct { + Name string `json:"name"` + Spec Spec `json:"spec"` +} + +// ToPolicyCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return map[string]interface{}{"policy": b}, nil +} + +// Create makes a request against the API to create a policy +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(policyCreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Delete makes a request against the API to delete a policy. +func Delete(client *gophercloud.ServiceClient, policyID string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(policyDeleteURL(client, policyID), &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options to update a policy. +type UpdateOpts struct { + Name string `json:"name,omitempty"` +} + +// ToPolicyUpdateMap constructs a request body from UpdateOpts. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy") +} + +// Update updates a specified policy. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// ValidateOptsBuilder allows extensions to add additional parameters to the +// Validate request. +type ValidateOptsBuilder interface { + ToPolicyValidateMap() (map[string]interface{}, error) +} + +// ValidateOpts represents options used to validate a policy. +type ValidateOpts struct { + Spec Spec `json:"spec"` +} + +// ToPolicyValidateMap formats a CreateOpts into a body map. +func (opts ValidateOpts) ToPolicyValidateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "policy") +} + +// Validate policy will validate a specified policy. +func Validate(client *gophercloud.ServiceClient, opts ValidateOptsBuilder) (r ValidateResult) { + b, err := opts.ToPolicyValidateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(validateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Get makes a request against the API to get details for a policy. +func Get(client *gophercloud.ServiceClient, policyTypeName string) (r GetResult) { + url := policyGetURL(client, policyTypeName) + + _, r.Err = client.Get(url, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/results.go new file mode 100644 index 000000000000..46c88da9b8b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/results.go @@ -0,0 +1,172 @@ +package policies + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy represents a clustering policy in the Openstack cloud. +type Policy struct { + CreatedAt time.Time `json:"-"` + Data map[string]interface{} `json:"data"` + Domain string `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` + Project string `json:"project"` + Spec Spec `json:"spec"` + Type string `json:"type"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Policy) UnmarshalJSON(b []byte) error { + type tmp Policy + var s struct { + tmp + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Policy(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(gophercloud.RFC3339MilliNoZ, s.CreatedAt) + if err != nil { + r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt) + if err != nil { + return err + } + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(gophercloud.RFC3339MilliNoZ, s.UpdatedAt) + if err != nil { + r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt) + if err != nil { + return err + } + } + } + + return nil +} + +// Spec represents an OpenStack clustering policy spec. +type Spec struct { + Description string `json:"description"` + Properties map[string]interface{} `json:"properties"` + Type string `json:"type"` + Version string `json:"-"` +} + +func (r *Spec) UnmarshalJSON(b []byte) error { + type tmp Spec + var s struct { + tmp + Version interface{} `json:"version"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Spec(s.tmp) + + switch t := s.Version.(type) { + case float64: + if t == 1 { + r.Version = fmt.Sprintf("%.1f", t) + } else { + r.Version = strconv.FormatFloat(t, 'f', -1, 64) + } + case string: + r.Version = t + } + + return nil +} + +// policyResult is the resposne of a base Policy result. +type policyResult struct { + gophercloud.Result +} + +// Extract interpets any policyResult-base result as a Policy. +func (r policyResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"policy"` + } + err := r.ExtractInto(&s) + + return s.Policy, err +} + +// CreateResult is the result of an Update operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + policyResult +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret it as a Policy. +type GetResult struct { + policyResult +} + +// UpdateResult is the result of an Update operation. Call its Extract +// method to interpret it as a Policy. +type UpdateResult struct { + policyResult +} + +// ValidateResult is the result of a Validate operation. Call its Extract +// method to interpret it as a Policy. +type ValidateResult struct { + policyResult +} + +// DeleteResult is the result of a Delete operation. Call its Extract +// method to interpret it as a DeleteHeader. +type DeleteResult struct { + gophercloud.ErrResult +} + +// PolicyPage contains a list page of all policies from a List call. +type PolicyPage struct { + pagination.MarkerPageBase +} + +// IsEmpty determines if a PolicyPage contains any results. +func (page PolicyPage) IsEmpty() (bool, error) { + policies, err := ExtractPolicies(page) + return len(policies) == 0, err +} + +// LastMarker returns the last policy ID in a ListResult. +func (r PolicyPage) LastMarker() (string, error) { + policies, err := ExtractPolicies(r) + if err != nil { + return "", err + } + if len(policies) == 0 { + return "", nil + } + return policies[len(policies)-1].ID, nil +} + +// ExtractPolicies returns a slice of Policies from the List operation. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"policies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/doc.go new file mode 100644 index 000000000000..61bc1c3b6d4e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_policies_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/fixtures.go new file mode 100644 index 000000000000..cf03210184cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/fixtures.go @@ -0,0 +1,513 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policies" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const PolicyListBody1 = ` +{ + "policies": [ + { + "created_at": "2018-04-02T21:43:30.000000", + "data": {}, + "domain": null, + "id": "PolicyListBodyID1", + "name": "delpol", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": 60, + "reduce_desired_capacity": false + }, + "type": "senlin.policy.deletion", + "version": 1 + }, + "type": "senlin.policy.deletion-1.0", + "updated_at": "2018-04-02T00:19:12Z", + "user": "fe43e41739154b72818565e0d2580819" + } + ] +} +` + +const PolicyListBody2 = ` +{ + "policies": [ + { + "created_at": "2018-04-02T22:29:36.000000", + "data": {}, + "domain": null, + "id": "PolicyListBodyID2", + "name": "delpol2", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": 60, + "reduce_desired_capacity": false + }, + "type": "senlin.policy.deletion", + "version": "1.0" + }, + "type": "senlin.policy.deletion-1.0", + "updated_at": "2018-04-02T23:15:11.000000", + "user": "fe43e41739154b72818565e0d2580819" + } + ] +} +` + +const PolicyCreateBody = ` +{ + "policy": { + "created_at": "2018-04-04T00:18:36Z", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol4", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "hooks": { + "params": { + "queue": "zaqar_queue_name" + }, + "timeout": 180, + "type": "zaqar" + } + }, + "type": "senlin.policy.deletion", + "version": 1.1 + }, + "type": "senlin.policy.deletion-1.1", + "updated_at": null, + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyGetBody = ` +{ + "policy": { + "created_at": "2018-04-02T21:43:30.000000", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": 60, + "reduce_desired_capacity": false + }, + "type": "senlin.policy.deletion", + "version": 1 + }, + "type": "senlin.policy.deletion-1.0", + "updated_at": "2018-04-02T00:19:12Z", + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyUpdateBody = ` +{ + "policy": { + "created_at": "2018-04-02T21:43:30.000000", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol4", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "hooks": { + "params": { + "queue": "zaqar_queue_name" + }, + "timeout": 180, + "type": "zaqar" + } + }, + "type": "senlin.policy.deletion", + "version": 1.1 + }, + "type": "senlin.policy.deletion-1.1", + "updated_at": null, + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyBadUpdateBody = ` +{ + "policy": { + "created_at": "invalid", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol4", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "hooks": { + "params": { + "queue": "zaqar_queue_name" + }, + "timeout": 180, + "type": "zaqar" + } + }, + "type": "senlin.policy.deletion", + "version": 1.1 + }, + "type": "invalid", + "updated_at": null, + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyValidateBody = ` +{ + "policy": { + "created_at": "2018-04-02T21:43:30.000000", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol4", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "hooks": { + "params": { + "queue": "zaqar_queue_name" + }, + "timeout": 180, + "type": "zaqar" + } + }, + "type": "senlin.policy.deletion", + "version": 1.1 + }, + "type": "senlin.policy.deletion-1.1", + "updated_at": null, + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyBadValidateBody = ` +{ + "policy": { + "created_at": "invalid", + "data": {}, + "domain": null, + "id": "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + "name": "delpol4", + "project": "018cd0909fb44cd5bc9b7a3cd664920e", + "spec": { + "description": "A policy for choosing victim node(s) from a cluster for deletion.", + "properties": { + "hooks": { + "params": { + "queue": "zaqar_queue_name" + }, + "timeout": 180, + "type": "zaqar" + } + }, + "type": "senlin.policy.deletion", + "version": 1.1 + }, + "type": "invalid", + "updated_at": null, + "user": "fe43e41739154b72818565e0d2580819" + } +} +` + +const PolicyDeleteRequestID = "req-7328d1b0-9945-456f-b2cd-5166b77d14a8" +const PolicyIDtoUpdate = "b99b3ab4-3aa6-4fba-b827-69b88b9c544a" +const PolicyIDtoGet = "b99b3ab4-3aa6-4fba-b827-69b88b9c544a" +const PolicyIDtoDelete = "1" + +var ExpectedPolicy1 = policies.Policy{ + CreatedAt: time.Date(2018, 4, 2, 21, 43, 30, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "PolicyListBodyID1", + Name: "delpol", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": float64(60), + "reduce_desired_capacity": false, + }, + Type: "senlin.policy.deletion", + Version: "1.0", + }, + Type: "senlin.policy.deletion-1.0", + User: "fe43e41739154b72818565e0d2580819", + UpdatedAt: time.Date(2018, 4, 2, 0, 19, 12, 0, time.UTC), +} + +var ExpectedPolicy2 = policies.Policy{ + CreatedAt: time.Date(2018, 4, 2, 22, 29, 36, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "PolicyListBodyID2", + Name: "delpol2", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": float64(60), + "reduce_desired_capacity": false, + }, + Type: "senlin.policy.deletion", + Version: "1.0", + }, + Type: "senlin.policy.deletion-1.0", + User: "fe43e41739154b72818565e0d2580819", + UpdatedAt: time.Date(2018, 4, 2, 23, 15, 11, 0, time.UTC), +} + +var ExpectedPolicies = [][]policies.Policy{ + []policies.Policy{ExpectedPolicy1}, + []policies.Policy{ExpectedPolicy2}, +} + +var ExpectedCreatePolicy = policies.Policy{ + CreatedAt: time.Date(2018, 4, 4, 0, 18, 36, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + Name: "delpol4", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "hooks": map[string]interface{}{ + "params": map[string]interface{}{ + "queue": "zaqar_queue_name", + }, + "timeout": float64(180), + "type": "zaqar", + }, + }, + Type: "senlin.policy.deletion", + Version: "1.1", + }, + Type: "senlin.policy.deletion-1.1", + User: "fe43e41739154b72818565e0d2580819", +} + +var ExpectedGetPolicy = policies.Policy{ + CreatedAt: time.Date(2018, 4, 2, 21, 43, 30, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + Name: "delpol", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "criteria": "OLDEST_FIRST", + "destroy_after_deletion": true, + "grace_period": float64(60), + "reduce_desired_capacity": false, + }, + Type: "senlin.policy.deletion", + Version: "1.0", + }, + Type: "senlin.policy.deletion-1.0", + User: "fe43e41739154b72818565e0d2580819", + UpdatedAt: time.Date(2018, 4, 2, 0, 19, 12, 0, time.UTC), +} + +var ExpectedUpdatePolicy = policies.Policy{ + CreatedAt: time.Date(2018, 4, 2, 21, 43, 30, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + Name: "delpol4", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "hooks": map[string]interface{}{ + "params": map[string]interface{}{ + "queue": "zaqar_queue_name", + }, + "timeout": float64(180), + "type": "zaqar", + }, + }, + Type: "senlin.policy.deletion", + Version: "1.1", + }, + Type: "senlin.policy.deletion-1.1", + User: "fe43e41739154b72818565e0d2580819", +} + +var ExpectedValidatePolicy = policies.Policy{ + CreatedAt: time.Date(2018, 4, 2, 21, 43, 30, 0, time.UTC), + Data: map[string]interface{}{}, + Domain: "", + ID: "b99b3ab4-3aa6-4fba-b827-69b88b9c544a", + Name: "delpol4", + Project: "018cd0909fb44cd5bc9b7a3cd664920e", + + Spec: policies.Spec{ + Description: "A policy for choosing victim node(s) from a cluster for deletion.", + Properties: map[string]interface{}{ + "hooks": map[string]interface{}{ + "params": map[string]interface{}{ + "queue": "zaqar_queue_name", + }, + "timeout": float64(180), + "type": "zaqar", + }, + }, + Type: "senlin.policy.deletion", + Version: "1.1", + }, + Type: "senlin.policy.deletion-1.1", + User: "fe43e41739154b72818565e0d2580819", +} + +func HandlePolicyList(t *testing.T) { + th.Mux.HandleFunc("/v1/policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, PolicyListBody1) + case "PolicyListBodyID1": + fmt.Fprintf(w, PolicyListBody2) + case "PolicyListBodyID2": + fmt.Fprintf(w, `{"policies":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +func HandlePolicyCreate(t *testing.T) { + th.Mux.HandleFunc("/v1/policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, PolicyCreateBody) + }) +} + +func HandlePolicyDelete(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/"+PolicyIDtoDelete, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("X-OpenStack-Request-Id", PolicyDeleteRequestID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandlePolicyGet(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/"+PolicyIDtoGet, + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyGetBody) + }) +} + +func HandlePolicyUpdate(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/"+PolicyIDtoUpdate, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyUpdateBody) + }) +} + +func HandleBadPolicyUpdate(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/"+PolicyIDtoUpdate, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyBadUpdateBody) + }) +} + +func HandlePolicyValidate(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/validate", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyValidateBody) + }) +} + +func HandleBadPolicyValidate(t *testing.T) { + th.Mux.HandleFunc("/v1/policies/validate", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyBadValidateBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/requests_test.go new file mode 100644 index 000000000000..7bf3a527ee51 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/testing/requests_test.go @@ -0,0 +1,148 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListPolicies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyList(t) + + listOpts := policies.ListOpts{ + Limit: 1, + } + + count := 0 + err := policies.List(fake.ServiceClient(), listOpts).EachPage(func(page pagination.Page) (bool, error) { + actual, err := policies.ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract policies: %v", err) + return false, err + } + + th.AssertDeepEquals(t, ExpectedPolicies[count], actual) + count++ + + return true, nil + }) + + th.AssertNoErr(t, err) + + if count != 2 { + t.Errorf("Expected 2 pages, got %d", count) + } +} + +func TestCreatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyCreate(t) + + expected := ExpectedCreatePolicy + + opts := policies.CreateOpts{ + Name: ExpectedCreatePolicy.Name, + Spec: ExpectedCreatePolicy.Spec, + } + + actual, err := policies.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestDeletePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyDelete(t) + + res := policies.Delete(fake.ServiceClient(), PolicyIDtoDelete) + th.AssertNoErr(t, res.ExtractErr()) + + requestID := res.Header["X-Openstack-Request-Id"][0] + th.AssertEquals(t, PolicyDeleteRequestID, requestID) +} + +func TestUpdatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyUpdate(t) + + expected := ExpectedUpdatePolicy + + opts := policies.UpdateOpts{ + Name: ExpectedUpdatePolicy.Name, + } + + actual, err := policies.Update(fake.ServiceClient(), PolicyIDtoUpdate, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestBadUpdatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleBadPolicyUpdate(t) + + opts := policies.UpdateOpts{ + Name: ExpectedUpdatePolicy.Name, + } + + _, err := policies.Update(fake.ServiceClient(), PolicyIDtoUpdate, opts).Extract() + th.AssertEquals(t, false, err == nil) +} + +func TestValidatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyValidate(t) + + expected := ExpectedValidatePolicy + + opts := policies.ValidateOpts{ + Spec: ExpectedValidatePolicy.Spec, + } + + actual, err := policies.Validate(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expected, actual) +} + +func TestBadValidatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleBadPolicyValidate(t) + + opts := policies.ValidateOpts{ + Spec: ExpectedValidatePolicy.Spec, + } + + _, err := policies.Validate(fake.ServiceClient(), opts).Extract() + th.AssertEquals(t, false, err == nil) +} + +func TestGetPolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyGet(t) + + actual, err := policies.Get(fake.ServiceClient(), PolicyIDtoGet).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedGetPolicy, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/urls.go new file mode 100644 index 000000000000..d558ab0dcc07 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policies/urls.go @@ -0,0 +1,32 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const ( + apiVersion = "v1" + apiName = "policies" +) + +func policyListURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func policyCreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func policyDeleteURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(apiVersion, apiName, policyID) +} + +func policyGetURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(apiVersion, apiName, policyID) +} + +func updateURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(apiVersion, apiName, policyID) +} + +func validateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName, "validate") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/doc.go new file mode 100644 index 000000000000..2b1b6d686021 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/doc.go @@ -0,0 +1,31 @@ +/* +Package policytypes lists all policy types and shows details for a policy type +from the OpenStack Clustering Service. + +Example to List Policy Types + + allPages, err := policytypes.List(clusteringClient).AllPages() + if err != nil { + panic(err) + } + + allPolicyTypes, err := actions.ExtractPolicyTypes(allPages) + if err != nil { + panic(err) + } + + for _, policyType := range allPolicyTypes { + fmt.Printf("%+v\n", policyType) + } + +Example to Get a Policy Type + + policyTypeName := "senlin.policy.affinity-1.0" + policyTypeDetail, err := policyTypes.Get(clusteringClient, policyTypeName).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policyTypeDetail) +*/ +package policytypes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/requests.go new file mode 100644 index 000000000000..ca9a5a3be1f0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/requests.go @@ -0,0 +1,26 @@ +package policytypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the API to list policy types. +func List(client *gophercloud.ServiceClient) pagination.Pager { + url := policyTypeListURL(client) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return PolicyTypePage{pagination.SinglePageBase(r)} + }) +} + +// Get makes a request against the API to get details for a policy type. +func Get(client *gophercloud.ServiceClient, policyTypeName string) (r GetResult) { + url := policyTypeGetURL(client, policyTypeName) + + _, r.Err = client.Get(url, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/results.go new file mode 100644 index 000000000000..98c1e53cb7ab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/results.go @@ -0,0 +1,68 @@ +package policytypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// PolicyType represents a clustering policy type in the Openstack cloud. +type PolicyType struct { + Name string `json:"name"` + Version string `json:"version"` + SupportStatus map[string][]SupportStatusType `json:"support_status"` +} + +// SupportStatusType represents the support status information for a +// clustering policy type. +type SupportStatusType struct { + Status string `json:"status"` + Since string `json:"since"` +} + +// PolicyTypeDetail represents the detailed policy type information for a +// clustering policy type. +type PolicyTypeDetail struct { + Name string `json:"name"` + Schema map[string]interface{} `json:"schema"` + SupportStatus map[string][]SupportStatusType `json:"support_status,omitempty"` +} + +// policyTypeResult is the base result of a Policy Type operation. +type policyTypeResult struct { + gophercloud.Result +} + +// Extract interprets any policyTypeResult result as a PolicyTypeDetail. +func (r policyTypeResult) Extract() (*PolicyTypeDetail, error) { + var s struct { + PolicyType *PolicyTypeDetail `json:"policy_type"` + } + err := r.ExtractInto(&s) + return s.PolicyType, err +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret it as a PolicyTypeDetail. +type GetResult struct { + policyTypeResult +} + +// PolicyTypePage contains a single page of all policy types from a List call. +type PolicyTypePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines if a PolicyType contains any results. +func (page PolicyTypePage) IsEmpty() (bool, error) { + policyTypes, err := ExtractPolicyTypes(page) + return len(policyTypes) == 0, err +} + +// ExtractPolicyTypes returns a slice of PolicyTypes from a List operation. +func ExtractPolicyTypes(r pagination.Page) ([]PolicyType, error) { + var s struct { + PolicyTypes []PolicyType `json:"policy_types"` + } + err := (r.(PolicyTypePage)).ExtractInto(&s) + return s.PolicyTypes, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/doc.go new file mode 100644 index 000000000000..5bd30a47049b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_policytypes_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/fixtures.go new file mode 100644 index 000000000000..2732dd9ebfe8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/fixtures.go @@ -0,0 +1,235 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const FakePolicyTypetoGet = "fake-policytype" + +const PolicyTypeBody = ` +{ + "policy_types": [ + { + "name": "senlin.policy.affinity", + "version": "1.0", + "support_status": { + "1.0": [ + { + "status": "SUPPORTED", + "since": "2016.10" + } + ] + } + }, + { + "name": "senlin.policy.health", + "version": "1.0", + "support_status": { + "1.0": [ + { + "status": "EXPERIMENTAL", + "since": "2016.10" + } + ] + } + }, + { + "name": "senlin.policy.scaling", + "version": "1.0", + "support_status": { + "1.0": [ + { + "status": "SUPPORTED", + "since": "2016.04" + } + ] + } + }, + { + "name": "senlin.policy.region_placement", + "version": "1.0", + "support_status": { + "1.0": [ + { + "status": "EXPERIMENTAL", + "since": "2016.04" + }, + { + "status": "SUPPORTED", + "since": "2016.10" + } + ] + } + } + ] +} +` + +const PolicyTypeDetailBody = ` +{ + "policy_type": { + "name": "senlin.policy.batch-1.0", + "schema": { + "max_batch_size": { + "default": -1, + "description": "Maximum number of nodes that will be updated in parallel.", + "required": false, + "type": "Integer", + "updatable": false + }, + "min_in_service": { + "default": 1, + "description": "Minimum number of nodes in service when performing updates.", + "required": false, + "type": "Integer", + "updatable": false + }, + "pause_time": { + "default": 60, + "description": "Interval in seconds between update batches if any.", + "required": false, + "type": "Integer", + "updatable": false + } + }, + "support_status": { + "1.0": [ + { + "status": "EXPERIMENTAL", + "since": "2017.02" + } + ] + } + } +} +` + +var ExpectedPolicyType1 = policytypes.PolicyType{ + Name: "senlin.policy.affinity", + Version: "1.0", + SupportStatus: map[string][]policytypes.SupportStatusType{ + "1.0": { + { + Status: "SUPPORTED", + Since: "2016.10", + }, + }, + }, +} + +var ExpectedPolicyType2 = policytypes.PolicyType{ + Name: "senlin.policy.health", + Version: "1.0", + SupportStatus: map[string][]policytypes.SupportStatusType{ + "1.0": { + { + Status: "EXPERIMENTAL", + Since: "2016.10", + }, + }, + }, +} + +var ExpectedPolicyType3 = policytypes.PolicyType{ + Name: "senlin.policy.scaling", + Version: "1.0", + SupportStatus: map[string][]policytypes.SupportStatusType{ + "1.0": { + { + Status: "SUPPORTED", + Since: "2016.04", + }, + }, + }, +} + +var ExpectedPolicyType4 = policytypes.PolicyType{ + Name: "senlin.policy.region_placement", + Version: "1.0", + SupportStatus: map[string][]policytypes.SupportStatusType{ + "1.0": { + { + Status: "EXPERIMENTAL", + Since: "2016.04", + }, + { + Status: "SUPPORTED", + Since: "2016.10", + }, + }, + }, +} + +var ExpectedPolicyTypes = []policytypes.PolicyType{ + ExpectedPolicyType1, + ExpectedPolicyType2, + ExpectedPolicyType3, + ExpectedPolicyType4, +} + +var ExpectedPolicyTypeDetail = &policytypes.PolicyTypeDetail{ + Name: "senlin.policy.batch-1.0", + Schema: map[string]interface{}{ + "max_batch_size": map[string]interface{}{ + "default": float64(-1), + "description": "Maximum number of nodes that will be updated in parallel.", + "required": false, + "type": "Integer", + "updatable": false, + }, + "min_in_service": map[string]interface{}{ + "default": float64(1), + "description": "Minimum number of nodes in service when performing updates.", + "required": false, + "type": "Integer", + "updatable": false, + }, + "pause_time": map[string]interface{}{ + "default": float64(60), + "description": "Interval in seconds between update batches if any.", + "required": false, + "type": "Integer", + "updatable": false, + }, + }, + SupportStatus: map[string][]policytypes.SupportStatusType{ + "1.0": []policytypes.SupportStatusType{ + { + Status: "EXPERIMENTAL", + Since: "2017.02", + }, + }, + }, +} + +func HandlePolicyTypeList(t *testing.T) { + th.Mux.HandleFunc("/v1/policy-types", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyTypeBody) + }) +} + +func HandlePolicyTypeGet(t *testing.T) { + th.Mux.HandleFunc("/v1/policy-types/"+FakePolicyTypetoGet, + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, PolicyTypeDetailBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/requests_test.go new file mode 100644 index 000000000000..87e42fab1ac8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/testing/requests_test.go @@ -0,0 +1,49 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListPolicyTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyTypeList(t) + + count := 0 + err := policytypes.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := policytypes.ExtractPolicyTypes(page) + if err != nil { + t.Errorf("Failed to extract policy types: %v", err) + return false, err + } + th.AssertDeepEquals(t, ExpectedPolicyTypes, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGetPolicyType(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePolicyTypeGet(t) + + actual, err := policytypes.Get(fake.ServiceClient(), FakePolicyTypetoGet).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedPolicyTypeDetail, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/urls.go new file mode 100644 index 000000000000..b291a95c701b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/policytypes/urls.go @@ -0,0 +1,16 @@ +package policytypes + +import "github.com/gophercloud/gophercloud" + +const ( + apiVersion = "v1" + apiName = "policy-types" +) + +func policyTypeListURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func policyTypeGetURL(client *gophercloud.ServiceClient, policyTypeName string) string { + return client.ServiceURL(apiVersion, apiName, policyTypeName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/doc.go new file mode 100644 index 000000000000..13676df8c174 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/doc.go @@ -0,0 +1,85 @@ +/* +Package profiles provides information and interaction with profiles through +the OpenStack Clustering service. + +Example to Create a Profile + + networks := []map[string]interface{} { + {"network": "test-network"}, + } + + props := map[string]interface{}{ + "name": "test_gophercloud_profile", + "flavor": "t2.micro", + "image": "centos7.3-latest", + "networks": networks, + "security_groups": "", + } + + createOpts := profiles.CreateOpts { + Name: "test_profile", + Spec: profiles.Spec{ + Type: "os.nova.server", + Version: "1.0", + Properties: props, + }, + } + + profile, err := profiles.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Println("Profile", profile) + +Example to Get a Profile + + profile, err := profiles.Get(serviceClient, "profile-name").Extract() + if err != nil { + panic(err) + } + + fmt.Print("profile", profile) + + +Example to List Profiles + + listOpts := profiles.ListOpts{ + Limit: 2, + } + + profiles.List(serviceClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { + allProfiles, err := profiles.ExtractProfiles(page) + if err != nil { + panic(err) + } + + for _, profile := range allProfiles { + fmt.Printf("%+v\n", profile) + } + return true, nil + }) + +Example to Update a Profile + + updateOpts := profiles.UpdateOpts{ + Name: "new-name", + } + + profile, err := profiles.Update(serviceClient, profileName, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Print("profile", profile) + +Example to Delete a Profile + + profileID := "6dc6d336e3fc4c0a951b5698cd1236ee" + err := profiles.Delete(serviceClient, profileID).ExtractErr() + if err != nil { + panic(err) + } + +*/ +package profiles diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/requests.go new file mode 100644 index 000000000000..5fb10f8159d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/requests.go @@ -0,0 +1,139 @@ +package profiles + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToProfileCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used for creating a profile. +type CreateOpts struct { + Name string `json:"name" required:"true"` + Metadata map[string]interface{} `json:"metadata,omitempty"` + Spec Spec `json:"spec" required:"true"` +} + +// ToProfileCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToProfileCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "profile") +} + +// Create requests the creation of a new profile on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToProfileCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Get retrieves detail of a single profile. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToProfileListQuery() (string, error) +} + +// ListOpts represents options used to list profiles. +type ListOpts struct { + GlobalProject *bool `q:"global_project"` + Limit int `q:"limit"` + Marker string `q:"marker"` + Name string `q:"name"` + Sort string `q:"sort"` + Type string `q:"type"` +} + +// ToProfileListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToProfileListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of profiles. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToProfileListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProfilePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToProfileUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a profile. +type UpdateOpts struct { + Metadata map[string]interface{} `json:"metadata,omitempty"` + Name string `json:"name,omitempty"` +} + +// ToProfileUpdateMap constructs a request body from UpdateOpts. +func (opts UpdateOpts) ToProfileUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "profile") +} + +// Update updates a profile. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToProfileUpdateMap() + if err != nil { + r.Err = err + return r + } + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Delete deletes the specified profile via profile id. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + if r.Err == nil { + r.Header = result.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/results.go new file mode 100644 index 000000000000..30bacadfee00 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/results.go @@ -0,0 +1,148 @@ +package profiles + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Profile represent a detailed profile. +type Profile struct { + CreatedAt time.Time `json:"-"` + Domain string `json:"domain"` + ID string `json:"id"` + Metadata map[string]interface{} `json:"metadata"` + Name string `json:"name"` + Project string `json:"project"` + Spec Spec `json:"spec"` + Type string `json:"type"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Profile) UnmarshalJSON(b []byte) error { + type tmp Profile + var s struct { + tmp + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Profile(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt) + if err != nil { + return err + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt) + if err != nil { + return err + } + } + + return nil +} + +// Spec represents a profile spec. +type Spec struct { + Type string `json:"type"` + Version string `json:"-"` + Properties map[string]interface{} `json:"properties"` +} + +func (r *Spec) UnmarshalJSON(b []byte) error { + type tmp Spec + var s struct { + tmp + Version interface{} `json:"version"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Spec(s.tmp) + + switch t := s.Version.(type) { + case float64: + if t == 1 { + r.Version = fmt.Sprintf("%.1f", t) + } else { + r.Version = strconv.FormatFloat(t, 'f', -1, 64) + } + case string: + r.Version = t + } + + return nil +} + +// commonResult is the base result of a Profile operation. +type commonResult struct { + gophercloud.Result +} + +// Extract provides access to Profile returned by the Get and Create functions. +func (r commonResult) Extract() (*Profile, error) { + var s struct { + Profile *Profile `json:"profile"` + } + err := r.ExtractInto(&s) + return s.Profile, err +} + +// CreateResult is the result of a Create operation. Call its Extract +// method to interpret it as a Profile. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get operations. Call its Extract +// method to interpret it as a Profile. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of a Update operations. Call its Extract +// method to interpret it as a Profile. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ProfilePage contains a single page of all profiles from a List operation. +type ProfilePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a ProfilePage contains any results. +func (page ProfilePage) IsEmpty() (bool, error) { + profiles, err := ExtractProfiles(page) + return len(profiles) == 0, err +} + +// ExtractProfiles returns a slice of Profiles from the List operation. +func ExtractProfiles(r pagination.Page) ([]Profile, error) { + var s struct { + Profiles []Profile `json:"profiles"` + } + err := (r.(ProfilePage)).ExtractInto(&s) + return s.Profiles, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/doc.go new file mode 100644 index 000000000000..2a99a8a64b3e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_profiles_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/fixtures.go new file mode 100644 index 000000000000..f989fc3ac54f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/fixtures.go @@ -0,0 +1,395 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const CreateResponse = ` +{ + "profile": { + "created_at": "2016-01-03T16:22:23Z", + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": {}, + "name": "test-profile", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": "t2.small", + "image": "centos7.3-latest", + "name": "centos_server", + "networks": [ + { + "network": "private-network" + } + ] + }, + "type": "os.nova.server", + "version": "1.0" + }, + "type": "os.nova.server-1.0", + "updated_at": "2016-01-03T17:22:23Z", + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedCreate = profiles.Profile{ + CreatedAt: time.Date(2016, 1, 3, 16, 22, 23, 0, time.UTC), + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{}, + Name: "test-profile", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": "t2.small", + "image": "centos7.3-latest", + "name": "centos_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private-network"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + UpdatedAt: time.Date(2016, 1, 3, 17, 22, 23, 0, time.UTC), + User: "5e5bf8027826429c96af157f68dc9072", +} + +const GetResponse = ` +{ + "profile": { + "created_at": "2016-01-03T16:22:23Z", + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": {}, + "name": "pserver", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": 1, + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": [ + { + "network": "private" + } + ] + }, + "type": "os.nova.server", + "version": "1.0" + }, + "type": "os.nova.server-1.0", + "updated_at": null, + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedGet = profiles.Profile{ + CreatedAt: time.Date(2016, 1, 3, 16, 22, 23, 0, time.UTC), + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{}, + Name: "pserver", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": float64(1), + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + User: "5e5bf8027826429c96af157f68dc9072", +} + +const ListResponse = ` +{ + "profiles": [ + { + "created_at": "2016-01-03T16:22:23Z", + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": {}, + "name": "pserver", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": [ + { + "network": "private" + } + ] + }, + "type": "os.nova.server", + "version": 1.0 + }, + "type": "os.nova.server-1.0", + "updated_at": "2016-01-03T17:22:23Z", + "user": "5e5bf8027826429c96af157f68dc9072" + }, + { + "created_at": null, + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": {}, + "name": "pserver", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": [ + { + "network": "private" + } + ] + }, + "type": "os.nova.server", + "version": 1.0 + }, + "type": "os.nova.server-1.0", + "updated_at": null, + "user": "5e5bf8027826429c96af157f68dc9072" + }, + { + "created_at": "", + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": {}, + "name": "pserver", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": [ + { + "network": "private" + } + ] + }, + "type": "os.nova.server", + "version": "1.0" + }, + "type": "os.nova.server-1.0", + "updated_at": "", + "user": "5e5bf8027826429c96af157f68dc9072" + } + ] +}` + +var ExpectedListProfile1 = profiles.Profile{ + CreatedAt: time.Date(2016, 1, 3, 16, 22, 23, 0, time.UTC), + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{}, + Name: "pserver", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + UpdatedAt: time.Date(2016, 1, 3, 17, 22, 23, 0, time.UTC), + User: "5e5bf8027826429c96af157f68dc9072", +} + +var ExpectedListProfile2 = profiles.Profile{ + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{}, + Name: "pserver", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + User: "5e5bf8027826429c96af157f68dc9072", +} + +var ExpectedListProfile3 = profiles.Profile{ + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{}, + Name: "pserver", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": "t2.small", + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + User: "5e5bf8027826429c96af157f68dc9072", +} + +var ExpectedList = []profiles.Profile{ + ExpectedListProfile1, + ExpectedListProfile2, + ExpectedListProfile3, +} + +const UpdateResponse = ` +{ + "profile": { + "created_at": "2016-01-03T16:22:23Z", + "domain": null, + "id": "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + "metadata": { + "foo": "bar" + }, + "name": "pserver", + "project": "42d9e9663331431f97b75e25136307ff", + "spec": { + "properties": { + "flavor": 1, + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": [ + { + "network": "private" + } + ] + }, + "type": "os.nova.server", + "version": "1.0" + }, + "type": "os.nova.server-1.0", + "updated_at": "2016-01-03T17:22:23Z", + "user": "5e5bf8027826429c96af157f68dc9072" + } +}` + +var ExpectedUpdate = profiles.Profile{ + CreatedAt: time.Date(2016, 1, 3, 16, 22, 23, 0, time.UTC), + Domain: "", + ID: "9e1c6f42-acf5-4688-be2c-8ce954ef0f23", + Metadata: map[string]interface{}{"foo": "bar"}, + Name: "pserver", + Project: "42d9e9663331431f97b75e25136307ff", + Spec: profiles.Spec{ + Properties: map[string]interface{}{ + "flavor": float64(1), + "image": "cirros-0.3.4-x86_64-uec", + "key_name": "oskey", + "name": "cirros_server", + "networks": []interface{}{ + map[string]interface{}{"network": "private"}, + }, + }, + Type: "os.nova.server", + Version: "1.0", + }, + Type: "os.nova.server-1.0", + UpdatedAt: time.Date(2016, 1, 3, 17, 22, 23, 0, time.UTC), + User: "5e5bf8027826429c96af157f68dc9072", +} + +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profiles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, CreateResponse) + }) +} + +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profiles/9e1c6f42-acf5-4688-be2c-8ce954ef0f23", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, GetResponse) + }) +} + +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profiles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) +} + +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profiles/9e1c6f42-acf5-4688-be2c-8ce954ef0f23", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse) + }) +} + +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profiles/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/requests_test.go new file mode 100644 index 000000000000..1793c4bf1f9c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/testing/requests_test.go @@ -0,0 +1,109 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateProfile(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateSuccessfully(t) + + networks := []map[string]interface{}{ + {"network": "private-network"}, + } + + props := map[string]interface{}{ + "name": "test_gopher_cloud_profile", + "flavor": "t2.small", + "image": "centos7.3-latest", + "networks": networks, + "security_groups": "", + } + + createOpts := &profiles.CreateOpts{ + Name: "TestProfile", + Spec: profiles.Spec{ + Type: "os.nova.server", + Version: "1.0", + Properties: props, + }, + } + + profile, err := profiles.Create(fake.ServiceClient(), createOpts).Extract() + if err != nil { + t.Errorf("Failed to extract profile: %v", err) + } + + th.AssertDeepEquals(t, ExpectedCreate, *profile) +} + +func TestGetProfile(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t) + + actual, err := profiles.Get(fake.ServiceClient(), ExpectedGet.ID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedGet, *actual) +} + +func TestListProfiles(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + var iFalse bool + listOpts := profiles.ListOpts{ + GlobalProject: &iFalse, + } + + count := 0 + err := profiles.List(fake.ServiceClient(), listOpts).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := profiles.ExtractProfiles(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedList, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page of profiles, got %d pages instead", count) + } +} + +func TestUpdateProfile(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateSuccessfully(t) + + updateOpts := profiles.UpdateOpts{ + Name: "pserver", + } + + actual, err := profiles.Update(fake.ServiceClient(), ExpectedUpdate.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedUpdate, *actual) +} + +func TestDeleteProfile(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteSuccessfully(t) + + deleteResult := profiles.Delete(fake.ServiceClient(), "6dc6d336e3fc4c0a951b5698cd1236ee") + th.AssertNoErr(t, deleteResult.ExtractErr()) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/urls.go new file mode 100644 index 000000000000..060d91ddc2f0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiles/urls.go @@ -0,0 +1,34 @@ +package profiles + +import "github.com/gophercloud/gophercloud" + +var apiVersion = "v1" +var apiName = "profiles" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/doc.go new file mode 100644 index 000000000000..39877ace88e1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/doc.go @@ -0,0 +1,47 @@ +/* +Package profiletypes lists all profile types and shows details for a profile type from the OpenStack +Clustering Service. + +Example to List ProfileType + + err = profiletypes.List(serviceClient).EachPage(func(page pagination.Page) (bool, error) { + profileTypes, err := profiletypes.ExtractProfileTypes(page) + if err != nil { + return false, err + } + + for _, profileType := range profileTypes { + fmt.Println("%+v\n", profileType) + } + return true, nil + }) + +Example to Get a ProfileType + + profileTypeName := "os.nova.server" + profileType, err := profiletypes.Get(clusteringClient, profileTypeName).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", profileType) + +Example of list operations supported by a profile type + serviceClient.Microversion = "1.5" + + profileTypeName := "os.nova.server-1.0" + allPages, err := profiletypes.ListOps(serviceClient, profileTypeName).AllPages() + if err != nil { + panic(err) + } + + ops, err := profiletypes.ExtractOps(allPages) + if err != nil { + panic(err) + } + + for _, op := range ops { + fmt.Printf("%+v\n", op) + } + +*/ +package profiletypes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/requests.go new file mode 100644 index 000000000000..37fe0313390f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/requests.go @@ -0,0 +1,28 @@ +package profiletypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, + &gophercloud.RequestOpts{OkCodes: []int{200}}) + + return +} + +// List makes a request against the API to list profile types. +func List(client *gophercloud.ServiceClient) pagination.Pager { + url := listURL(client) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProfileTypePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +func ListOps(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := listOpsURL(client, id) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return OperationPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/results.go new file mode 100644 index 000000000000..3259f68ac3b6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/results.go @@ -0,0 +1,67 @@ +package profiletypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// GetResult is the response of a Get operations. +type GetResult struct { + commonResult +} + +type Schema map[string]interface{} +type SupportStatus map[string]interface{} + +type ProfileType struct { + Name string `json:"name"` + Schema map[string]Schema `json:"schema"` + SupportStatus map[string][]SupportStatus `json:"support_status"` +} + +func (r commonResult) Extract() (*ProfileType, error) { + var s struct { + ProfileType *ProfileType `json:"profile_type"` + } + err := r.ExtractInto(&s) + return s.ProfileType, err +} + +// ExtractProfileTypes provides access to the list of profiles in a page acquired from the List operation. +func ExtractProfileTypes(r pagination.Page) ([]ProfileType, error) { + var s struct { + ProfileTypes []ProfileType `json:"profile_types"` + } + err := (r.(ProfileTypePage)).ExtractInto(&s) + return s.ProfileTypes, err +} + +// ProfileTypePage contains a single page of all profiles from a ListDetails call. +type ProfileTypePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if ExtractProfileTypes contains any results. +func (page ProfileTypePage) IsEmpty() (bool, error) { + profileTypes, err := ExtractProfileTypes(page) + return len(profileTypes) == 0, err +} + +// OperationPage contains a single page of all profile type operations from a ListOps call. +type OperationPage struct { + pagination.SinglePageBase +} + +// ExtractOps provides access to the list of operations in a page acquired from the ListOps operation. +func ExtractOps(r pagination.Page) (map[string]interface{}, error) { + var s struct { + Operations map[string]interface{} `json:"operations"` + } + err := (r.(OperationPage)).ExtractInto(&s) + return s.Operations, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/doc.go new file mode 100644 index 000000000000..7890e625432b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_profiletypes_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/fixtures.go new file mode 100644 index 000000000000..64a112fff6b5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/fixtures.go @@ -0,0 +1,296 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ProfileTypeRequestID = "req-7328d1b0-9945-456f-b2cd-5166b77d14a8" +const ListResponse = ` +{ + "profile_types": [ + { + "name": "os.nova.server-1.0", + "schema": { + "context": { + "description": "Customized security context for operating containers.", + "required": false, + "type": "Map", + "updatable": false + }, + "name": { + "description": "The name of the container.", + "required": false, + "type": "Map", + "updatable": false + } + } + }, + { + "name": "os.heat.stack-1.0", + "schema": { + "context": { + "default": {}, + "description": "A dictionary for specifying the customized context for stack operations", + "required": false, + "type": "Map", + "updatable": false + }, + "disable_rollback": { + "default": true, + "description": "A boolean specifying whether a stack operation can be rolled back.", + "required": false, + "type": "Boolean", + "updatable": true + }, + "environment": { + "default": {}, + "description": "A map that specifies the environment used for stack operations.", + "required": false, + "type": "Map", + "updatable": true + }, + "files": { + "default": {}, + "description": "Contents of files referenced by the template, if any.", + "required": false, + "type": "Map", + "updatable": true + } + }, + "support_status": { + "1.0": [ + { + "status": "SUPPORTED", + "since": "2016.04" + } + ] + } + } + ] +} +` + +const GetResponse1 = ` +{ + "profile_type": { + "name": "os.nova.server-1.0", + "schema": { + "context": { + "description": "Customized security context for operating containers.", + "required": false, + "type": "Map", + "updatable": false + }, + "name": { + "description": "The name of the container.", + "required": false, + "type": "Map", + "updatable": false + } + } + } +} +` + +const GetResponse15 = ` +{ + "profile_type": { + "name": "os.heat.stack-1.0", + "schema": { + "context": { + "default": {}, + "description": "A dictionary for specifying the customized context for stack operations", + "required": false, + "type": "Map", + "updatable": false + }, + "disable_rollback": { + "default": true, + "description": "A boolean specifying whether a stack operation can be rolled back.", + "required": false, + "type": "Boolean", + "updatable": true + }, + "environment": { + "default": {}, + "description": "A map that specifies the environment used for stack operations.", + "required": false, + "type": "Map", + "updatable": true + }, + "files": { + "default": {}, + "description": "Contents of files referenced by the template, if any.", + "required": false, + "type": "Map", + "updatable": true + } + }, + "support_status": { + "1.0": [ + { + "status": "SUPPORTED", + "since": "2016.04" + } + ] + } + } +} +` + +var ExpectedProfileType1 = profiletypes.ProfileType{ + Name: "os.nova.server-1.0", + Schema: map[string]profiletypes.Schema{ + "context": { + "description": "Customized security context for operating containers.", + "required": false, + "type": "Map", + "updatable": false, + }, + "name": { + "description": "The name of the container.", + "required": false, + "type": "Map", + "updatable": false, + }, + }, +} + +var ExpectedProfileType15 = profiletypes.ProfileType{ + Name: "os.heat.stack-1.0", + Schema: map[string]profiletypes.Schema{ + "context": { + "default": map[string]interface{}{}, + "description": "A dictionary for specifying the customized context for stack operations", + "required": false, + "type": "Map", + "updatable": false, + }, + "disable_rollback": { + "default": true, + "description": "A boolean specifying whether a stack operation can be rolled back.", + "required": false, + "type": "Boolean", + "updatable": true, + }, + "environment": { + "default": map[string]interface{}{}, + "description": "A map that specifies the environment used for stack operations.", + "required": false, + "type": "Map", + "updatable": true, + }, + "files": { + "default": map[string]interface{}{}, + "description": "Contents of files referenced by the template, if any.", + "required": false, + "type": "Map", + "updatable": true, + }, + }, + SupportStatus: map[string][]profiletypes.SupportStatus{ + "1.0": { + { + "status": "SUPPORTED", + "since": "2016.04", + }, + }, + }, +} + +var ExpectedProfileTypes = []profiletypes.ProfileType{ExpectedProfileType1, ExpectedProfileType15} + +func HandleList1Successfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profile-types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResponse) + }) +} + +func HandleGet1Successfully(t *testing.T, id string) { + th.Mux.HandleFunc("/v1/profile-types/"+id, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", ProfileTypeRequestID) + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse1) + }) +} + +func HandleGet15Successfully(t *testing.T, id string) { + th.Mux.HandleFunc("/v1/profile-types/"+id, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", ProfileTypeRequestID) + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse15) + }) +} + +const ProfileTypeName = "os.nova.server-1.0" + +const ListOpsResponse = ` +{ + "operations": { + "pause": { + "description": "Pause the server from running.", + "parameter": null + }, + "change_password": { + "description": "Change the administrator password.", + "parameters": { + "admin_pass": { + "description": "New password for the administrator.", + "required": false, + "type": "String" + } + } + } + } +} +` + +var ExpectedOps = map[string]interface{}{ + "change_password": map[string]interface{}{ + "description": "Change the administrator password.", + "parameters": map[string]interface{}{ + "admin_pass": map[string]interface{}{ + "description": "New password for the administrator.", + "required": false, + "type": "String", + }, + }, + }, + "pause": map[string]interface{}{ + "description": "Pause the server from running.", + "parameter": nil, + }, +} + +func HandleListOpsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/profile-types/"+ProfileTypeName+"/ops", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-ID", ProfileTypeRequestID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOpsResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/requests_test.go new file mode 100644 index 000000000000..c4f379b64544 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/testing/requests_test.go @@ -0,0 +1,74 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/acceptance/tools" + "github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListProfileTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleList1Successfully(t) + + pageCount := 0 + err := profiletypes.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pageCount++ + actual, err := profiletypes.ExtractProfileTypes(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedProfileTypes, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if pageCount != 1 { + t.Errorf("Expected 1 page, got %d", pageCount) + } +} + +func TestGetProfileType10(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGet1Successfully(t, ExpectedProfileType1.Name) + + actual, err := profiletypes.Get(fake.ServiceClient(), ExpectedProfileType1.Name).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedProfileType1, *actual) +} + +func TestGetProfileType15(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGet15Successfully(t, ExpectedProfileType15.Name) + + actual, err := profiletypes.Get(fake.ServiceClient(), ExpectedProfileType15.Name).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedProfileType15, *actual) +} + +func TestListProfileTypesOps(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListOpsSuccessfully(t) + + allPages, err := profiletypes.ListOps(fake.ServiceClient(), ProfileTypeName).AllPages() + th.AssertNoErr(t, err) + + allPolicyTypes, err := profiletypes.ExtractOps(allPages) + th.AssertNoErr(t, err) + + for k, v := range allPolicyTypes { + tools.PrintResource(t, k) + tools.PrintResource(t, v) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/urls.go new file mode 100644 index 000000000000..cec8bfe46a3a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/profiletypes/urls.go @@ -0,0 +1,28 @@ +package profiletypes + +import "github.com/gophercloud/gophercloud" + +const ( + apiVersion = "v1" + apiName = "profile-types" +) + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func profileTypeURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return profileTypeURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func listOpsURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id, "ops") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/doc.go new file mode 100644 index 000000000000..0217136a6254 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/doc.go @@ -0,0 +1,72 @@ +/* +Package receivers provides information and interaction with the receivers through +the OpenStack Clustering service. + +Example to Create a Receiver + + createOpts := receivers.CreateOpts{ + Action: "CLUSTER_DEL_NODES", + ClusterID: "b7b870ee-d3c5-4a93-b9d7-846c53b2c2dc", + Name: "test_receiver", + Type: receivers.WebhookReceiver, + } + + receiver, err := receivers.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", receiver) + +Example to Get a Receiver + + receiver, err := receivers.Get(serviceClient, "receiver-name").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", receiver) + +Example to Delete receiver + + receiverID := "6dc6d336e3fc4c0a951b5698cd1236ee" + err := receivers.Delete(serviceClient, receiverID).ExtractErr() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", receiver) + +Example to Update Receiver + + updateOpts := receivers.UpdateOpts{ + Name: "new-name", + } + + receiverID := "6dc6d336e3fc4c0a951b5698cd1236ee" + receiver, err := receivers.Update(serviceClient, receiverID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", receiver) + +Example to List Receivers + + listOpts := receivers.ListOpts{ + Limit: 2, + } + + receivers.List(serviceClient, listOpts).EachPage(func(page pagination.Page) (bool, error) { + allReceivers, err := receivers.ExtractReceivers(page) + if err != nil { + panic(err) + } + + for _, receiver := range allReceivers { + fmt.Printf("%+v\n", receiver) + } + return true, nil + }) +*/ +package receivers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/requests.go new file mode 100644 index 000000000000..1c528c732d4a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/requests.go @@ -0,0 +1,145 @@ +package receivers + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ReceiverType represents a valid type of receiver +type ReceiverType string + +const ( + WebhookReceiver ReceiverType = "webhook" + MessageReceiver ReceiverType = "message" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToReceiverCreateMap() (map[string]interface{}, error) +} + +// CreatOpts represents options used to create a receiver. +type CreateOpts struct { + Name string `json:"name" required:"true"` + ClusterID string `json:"cluster_id,omitempty"` + Type ReceiverType `json:"type" required:"true"` + Action string `json:"action,omitempty"` + Actor map[string]interface{} `json:"actor,omitempty"` + Params map[string]interface{} `json:"params,omitempty"` +} + +// ToReceiverCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToReceiverCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "receiver") +} + +// Create requests the creation of a new receiver. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToReceiverCreateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToReceiverUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a receiver. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Action string `json:"action,omitempty"` + Params map[string]interface{} `json:"params,omitempty"` +} + +// ToReceiverUpdateMap constructs a request body from UpdateOpts. +func (opts UpdateOpts) ToReceiverUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "receiver") +} + +// Update requests the update of a receiver. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToReceiverUpdateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} + +// Get retrieves details of a single receiver. Use Extract to convert its result into a Receiver. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToReceiverListQuery() (string, error) +} + +// ListOpts represents options used to list recievers. +type ListOpts struct { + Limit int `q:"limit"` + Marker string `q:"marker"` + Sort string `q:"sort"` + GlobalProject *bool `q:"global_project"` + Name string `q:"name"` + Type string `q:"type"` + ClusterID string `q:"cluster_id"` + Action string `q:"action"` + User string `q:"user"` +} + +// ToReceiverListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToReceiverListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of cluster. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToReceiverListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ReceiverPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete deletes the specified receiver ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/results.go new file mode 100644 index 000000000000..9299921586da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/results.go @@ -0,0 +1,114 @@ +package receivers + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Receiver represent a detailed receiver +type Receiver struct { + Action string `json:"action"` + Actor map[string]interface{} `json:"actor"` + Channel map[string]interface{} `json:"channel"` + ClusterID string `json:"cluster_id"` + CreatedAt time.Time `json:"-"` + Domain string `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` + Params map[string]interface{} `json:"params"` + Project string `json:"project"` + Type string `json:"type"` + UpdatedAt time.Time `json:"-"` + User string `json:"user"` +} + +func (r *Receiver) UnmarshalJSON(b []byte) error { + type tmp Receiver + var s struct { + tmp + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Receiver(s.tmp) + + if s.CreatedAt != "" { + r.CreatedAt, err = time.Parse(time.RFC3339, s.CreatedAt) + if err != nil { + return err + } + } + + if s.UpdatedAt != "" { + r.UpdatedAt, err = time.Parse(time.RFC3339, s.UpdatedAt) + if err != nil { + return err + } + } + + return nil +} + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult-based result as a Receiver. +func (r commonResult) Extract() (*Receiver, error) { + var s struct { + Receiver *Receiver `json:"receiver"` + } + err := r.ExtractInto(&s) + return s.Receiver, err +} + +// CreateResult is the result of a Create operation. Call its Extract method +// to interpret it as a Receiver. +type CreateResult struct { + commonResult +} + +// GetResult is the result for of a Get operation. Call its Extract method +// to interpret it as a Receiver. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of a Update operation. Call its Extract method +// to interpret it as a Receiver. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ReceiverPage contains a single page of all nodes from a List operation. +type ReceiverPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a ReceiverPage contains any results. +func (page ReceiverPage) IsEmpty() (bool, error) { + receivers, err := ExtractReceivers(page) + return len(receivers) == 0, err +} + +// ExtractReceivers returns a slice of Receivers from the List operation. +func ExtractReceivers(r pagination.Page) ([]Receiver, error) { + var s struct { + Receivers []Receiver `json:"receivers"` + } + err := (r.(ReceiverPage)).ExtractInto(&s) + return s.Receivers, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/doc.go new file mode 100644 index 000000000000..c692a35a01a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_receivers_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/fixtures.go new file mode 100644 index 000000000000..498d162dd89a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/fixtures.go @@ -0,0 +1,229 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const CreateResponse = ` +{ + "receiver": { + "action": "CLUSTER_SCALE_OUT", + "actor": { + "trust_id": [ + "6dc6d336e3fc4c0a951b5698cd1236d9" + ] + }, + "channel": { + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1" + }, + "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", + "created_at": "2015-11-04T05:21:41Z", + "domain": "Default", + "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + "name": "cluster_inflate", + "params": { + "count": "1" + }, + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "type": "webhook", + "updated_at": "2016-11-04T05:21:41Z", + "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" + } +}` + +var ExpectedReceiver = receivers.Receiver{ + Action: "CLUSTER_SCALE_OUT", + Actor: map[string]interface{}{ + "trust_id": []string{ + "6dc6d336e3fc4c0a951b5698cd1236d9", + }, + }, + Channel: map[string]interface{}{ + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1", + }, + ClusterID: "ae63a10b-4a90-452c-aef1-113a0b255ee3", + CreatedAt: time.Date(2015, 11, 4, 5, 21, 41, 0, time.UTC), + Domain: "Default", + ID: "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + Name: "cluster_inflate", + Params: map[string]interface{}{ + "count": "1", + }, + Project: "6e18cc2bdbeb48a5b3cad2dc499f6804", + Type: "webhook", + UpdatedAt: time.Date(2016, 11, 4, 5, 21, 41, 0, time.UTC), + User: "b4ad2d6e18cc2b9c48049f6dbe8a5b3c", +} + +const GetResponse = ` +{ + "receiver": { + "action": "CLUSTER_SCALE_OUT", + "actor": { + "trust_id": [ + "6dc6d336e3fc4c0a951b5698cd1236d9" + ] + }, + "channel": { + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1" + }, + "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", + "created_at": "2015-11-04T05:21:41Z", + "domain": "Default", + "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + "name": "cluster_inflate", + "params": { + "count": "1" + }, + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "type": "webhook", + "updated_at": "2016-11-04T05:21:41Z", + "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" + } +}` + +const UpdateResponse = ` +{ + "receiver": { + "action": "CLUSTER_SCALE_OUT", + "actor": { + "trust_id": [ + "6dc6d336e3fc4c0a951b5698cd1236d9" + ] + }, + "channel": { + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1" + }, + "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", + "created_at": "2015-06-27T05:09:43Z", + "domain": "Default", + "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + "name": "cluster_inflate", + "params": { + "count": "1" + }, + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "type": "webhook", + "updated_at": null, + "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" + } +}` + +var ExpectedUpdateReceiver = receivers.Receiver{ + Action: "CLUSTER_SCALE_OUT", + Actor: map[string]interface{}{ + "trust_id": []string{ + "6dc6d336e3fc4c0a951b5698cd1236d9", + }, + }, + Channel: map[string]interface{}{ + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1", + }, + ClusterID: "ae63a10b-4a90-452c-aef1-113a0b255ee3", + CreatedAt: time.Date(2015, 6, 27, 5, 9, 43, 0, time.UTC), + Domain: "Default", + ID: "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + Name: "cluster_inflate", + Params: map[string]interface{}{ + "count": "1", + }, + Project: "6e18cc2bdbeb48a5b3cad2dc499f6804", + Type: "webhook", + User: "b4ad2d6e18cc2b9c48049f6dbe8a5b3c", +} + +const ListResponse = ` +{ + "receivers": [ + { + "action": "CLUSTER_SCALE_OUT", + "actor": { + "trust_id": [ + "6dc6d336e3fc4c0a951b5698cd1236d9" + ] + }, + "channel": { + "alarm_url": "http://node1:8778/v1/webhooks/e03dd2e5-8f2e-4ec1-8c6a-74ba891e5422/trigger?V=1&count=1" + }, + "cluster_id": "ae63a10b-4a90-452c-aef1-113a0b255ee3", + "created_at": "2015-06-27T05:09:43Z", + "domain": "Default", + "id": "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", + "name": "cluster_inflate", + "params": { + "count": "1" + }, + "project": "6e18cc2bdbeb48a5b3cad2dc499f6804", + "type": "webhook", + "updated_at": null, + "user": "b4ad2d6e18cc2b9c48049f6dbe8a5b3c" + } + ] +}` + +var ExpectedReceiversList = []receivers.Receiver{ExpectedUpdateReceiver} + +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/receivers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, CreateResponse) + }) +} + +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/receivers/573aa1ba-bf45-49fd-907d-6b5d6e6adfd3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, GetResponse) + }) +} + +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/receivers/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse) + }) +} + +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/receivers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestFormValues(t, r, map[string]string{"limit": "2", "sort": "name:asc,status:desc"}) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ListResponse) + }) +} + +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/receivers/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/requests_test.go new file mode 100644 index 000000000000..02f8f0692d48 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/testing/requests_test.go @@ -0,0 +1,94 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateReceiver(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateSuccessfully(t) + + opts := receivers.CreateOpts{ + Name: "cluster_inflate", + ClusterID: "ae63a10b-4a90-452c-aef1-113a0b255ee3", + Type: receivers.WebhookReceiver, + Action: "CLUSTER_SCALE_OUT", + Actor: map[string]interface{}{}, + Params: map[string]interface{}{}, + } + + actual, err := receivers.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedReceiver, *actual) +} + +func TestGetReceivers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t) + + actual, err := receivers.Get(fake.ServiceClient(), "573aa1ba-bf45-49fd-907d-6b5d6e6adfd3").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedReceiver, *actual) +} + +func TestUpdateReceiver(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateSuccessfully(t) + + opts := receivers.UpdateOpts{ + Name: "cluster_inflate", + Action: "CLUSTER_SCALE_OUT", + Params: map[string]interface{}{ + "count": "2", + }, + } + actual, err := receivers.Update(fake.ServiceClient(), "6dc6d336e3fc4c0a951b5698cd1236ee", opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedUpdateReceiver, *actual) +} + +func TestListReceivers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + opts := receivers.ListOpts{ + Limit: 2, + Sort: "name:asc,status:desc", + } + + count := 0 + receivers.List(fake.ServiceClient(), opts).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := receivers.ExtractReceivers(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedReceiversList, actual) + + return true, nil + }) + + th.AssertEquals(t, count, 1) +} + +func TestDeleteReceiver(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteSuccessfully(t) + + deleteResult := receivers.Delete(fake.ServiceClient(), "6dc6d336e3fc4c0a951b5698cd1236ee") + th.AssertNoErr(t, deleteResult.ExtractErr()) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/urls.go new file mode 100644 index 000000000000..be97ded53397 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/receivers/urls.go @@ -0,0 +1,34 @@ +package receivers + +import "github.com/gophercloud/gophercloud" + +var apiVersion = "v1" +var apiName = "receivers" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiVersion, apiName, id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/doc.go new file mode 100644 index 000000000000..c76dc11ffba5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/doc.go @@ -0,0 +1,15 @@ +/* +Package webhooks provides the ability to trigger an action represented by a webhook from the OpenStack Clustering +Service. + +Example to Trigger webhook action + + result, err := webhooks.Trigger(serviceClient(), "f93f83f6-762b-41b6-b757-80507834d394", webhooks.TriggerOpts{V: "1"}).Extract() + if err != nil { + panic(err) + } + + fmt.Println("result", result) + +*/ +package webhooks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/requests.go new file mode 100644 index 000000000000..1b211b4447c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/requests.go @@ -0,0 +1,53 @@ +package webhooks + +import ( + "fmt" + "net/url" + + "github.com/gophercloud/gophercloud" +) + +// TriggerOpts represents options used for triggering an action +type TriggerOpts struct { + V string `q:"V" required:"true"` + Params map[string]string +} + +// TriggerOptsBuilder Query string builder interface for webhooks +type TriggerOptsBuilder interface { + ToWebhookTriggerQuery() (string, error) +} + +// Query string builder for webhooks +func (opts TriggerOpts) ToWebhookTriggerQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + params := q.Query() + + for k, v := range opts.Params { + params.Add(k, v) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// Trigger an action represented by a webhook. +func Trigger(client *gophercloud.ServiceClient, id string, opts TriggerOptsBuilder) (r TriggerResult) { + url := triggerURL(client, id) + if opts != nil { + query, err := opts.ToWebhookTriggerQuery() + if err != nil { + r.Err = err + return + } + url += query + } else { + r.Err = fmt.Errorf("Must contain V for TriggerOpt") + return + } + + _, r.Err = client.Post(url, nil, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/results.go new file mode 100644 index 000000000000..ccb06086a2f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/results.go @@ -0,0 +1,22 @@ +package webhooks + +import ( + "github.com/gophercloud/gophercloud" +) + +type commonResult struct { + gophercloud.Result +} + +type TriggerResult struct { + commonResult +} + +// Extract retrieves the response action +func (r commonResult) Extract() (string, error) { + var s struct { + Action string `json:"action"` + } + err := r.ExtractInto(&s) + return s.Action, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/doc.go new file mode 100644 index 000000000000..9a759ee29a5e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/doc.go @@ -0,0 +1,2 @@ +// clustering_webhooks_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/requests_test.go new file mode 100644 index 000000000000..9d0da6f33512 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/testing/requests_test.go @@ -0,0 +1,136 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "encoding/json" + + "github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestWebhookTrigger(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "action": "290c44fa-c60f-4d75-a0eb-87433ba982a3" + }`) + }) + + triggerOpts := webhooks.TriggerOpts{ + V: "1", + Params: map[string]string{ + "foo": "bar", + "bar": "baz", + }, + } + result, err := webhooks.Trigger(fake.ServiceClient(), "f93f83f6-762b-41b6-b757-80507834d394", triggerOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, result, "290c44fa-c60f-4d75-a0eb-87433ba982a3") +} + +// Test webhook with params that generates query strings +func TestWebhookParams(t *testing.T) { + triggerOpts := webhooks.TriggerOpts{ + V: "1", + Params: map[string]string{ + "foo": "bar", + "bar": "baz", + }, + } + expected := "?V=1&bar=baz&foo=bar" + actual, err := triggerOpts.ToWebhookTriggerQuery() + th.AssertNoErr(t, err) + th.AssertEquals(t, actual, expected) +} + +// Nagative test case for returning invalid type (integer) for action id +func TestWebhooksInvalidAction(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "action": 123 + }`) + }) + + triggerOpts := webhooks.TriggerOpts{ + V: "1", + Params: map[string]string{ + "foo": "bar", + "bar": "baz", + }, + } + _, err := webhooks.Trigger(fake.ServiceClient(), "f93f83f6-762b-41b6-b757-80507834d394", triggerOpts).Extract() + isValid := err.(*json.UnmarshalTypeError) == nil + th.AssertEquals(t, false, isValid) +} + +// Negative test case for passing empty TriggerOpt +func TestWebhookTriggerInvalidEmptyOpt(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "action": "290c44fa-c60f-4d75-a0eb-87433ba982a3" + }`) + }) + + _, err := webhooks.Trigger(fake.ServiceClient(), "f93f83f6-762b-41b6-b757-80507834d394", webhooks.TriggerOpts{}).Extract() + if err == nil { + t.Errorf("Expected error without V param") + } +} + +// Negative test case for passing in nil for TriggerOpt +func TestWebhookTriggerInvalidNilOpt(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "action": "290c44fa-c60f-4d75-a0eb-87433ba982a3" + }`) + }) + + _, err := webhooks.Trigger(fake.ServiceClient(), "f93f83f6-762b-41b6-b757-80507834d394", nil).Extract() + + if err == nil { + t.Errorf("Expected error with nil param") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/urls.go new file mode 100644 index 000000000000..563cf81122d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/clustering/v1/webhooks/urls.go @@ -0,0 +1,7 @@ +package webhooks + +import "github.com/gophercloud/gophercloud" + +func triggerURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("v1", "webhooks", id, "trigger") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go new file mode 100644 index 000000000000..5510f2653952 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/doc.go @@ -0,0 +1,52 @@ +/* +Package extensions provides information and interaction with the different +extensions available for an OpenStack service. + +The purpose of OpenStack API extensions is to: + +- Introduce new features in the API without requiring a version change. +- Introduce vendor-specific niche functionality. +- Act as a proving ground for experimental functionalities that might be +included in a future version of the API. + +Extensions usually have tags that prevent conflicts with other extensions that +define attributes or resources with the same names, and with core resources and +attributes. Because an extension might not be supported by all plug-ins, its +availability varies with deployments and the specific plug-in. + +The results of this package vary depending on the type of Service Client used. +In the following examples, note how the only difference is the creation of the +Service Client. + +Example of Retrieving Compute Extensions + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + computeClient, err := openstack.NewComputeV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + + allPages, err := extensions.List(computeClient).Allpages() + allExtensions, err := extensions.ExtractExtensions(allPages) + + for _, extension := range allExtensions{ + fmt.Println("%+v\n", extension) + } + + +Example of Retrieving Network Extensions + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + networkClient, err := openstack.NewNetworkV2(provider, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) + + allPages, err := extensions.List(networkClient).Allpages() + allExtensions, err := extensions.ExtractExtensions(allPages) + + for _, extension := range allExtensions{ + fmt.Println("%+v\n", extension) + } +*/ +package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go new file mode 100755 index 000000000000..46b7d60cd694 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/requests.go @@ -0,0 +1,20 @@ +package extensions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) (r GetResult) { + _, r.Err = c.Get(ExtensionURL(c, alias), &r.Body, nil) + return +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, ListExtensionURL(c), func(r pagination.PageResult) pagination.Page { + return ExtensionPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go new file mode 100644 index 000000000000..8a26edd1c598 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/results.go @@ -0,0 +1,53 @@ +package extensions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult temporarily stores the result of a Get call. +// Use its Extract() method to interpret it as an Extension. +type GetResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult as an Extension. +func (r GetResult) Extract() (*Extension, error) { + var s struct { + Extension *Extension `json:"extension"` + } + err := r.ExtractInto(&s) + return s.Extension, err +} + +// Extension is a struct that represents an OpenStack extension. +type Extension struct { + Updated string `json:"updated"` + Name string `json:"name"` + Links []interface{} `json:"links"` + Namespace string `json:"namespace"` + Alias string `json:"alias"` + Description string `json:"description"` +} + +// ExtensionPage is the page returned by a pager when traversing over a collection of extensions. +type ExtensionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an ExtensionPage struct is empty. +func (r ExtensionPage) IsEmpty() (bool, error) { + is, err := ExtractExtensions(r) + return len(is) == 0, err +} + +// ExtractExtensions accepts a Page struct, specifically an ExtensionPage +// struct, and extracts the elements into a slice of Extension structs. +// In other words, a generic collection is mapped into a relevant slice. +func ExtractExtensions(r pagination.Page) ([]Extension, error) { + var s struct { + Extensions []Extension `json:"extensions"` + } + err := (r.(ExtensionPage)).ExtractInto(&s) + return s.Extensions, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/doc.go new file mode 100644 index 000000000000..8d076f30a082 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/doc.go @@ -0,0 +1,2 @@ +// common extensions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/fixtures.go new file mode 100644 index 000000000000..a986c950a26d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/fixtures.go @@ -0,0 +1,90 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/common/extensions" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Extension results. +const ListOutput = ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +}` + +// GetOutput provides a single Extension result. +const GetOutput = ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} +` + +// ListedExtension is the Extension that should be parsed from ListOutput. +var ListedExtension = extensions.Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", +} + +// ExpectedExtensions is a slice containing the Extension that should be parsed from ListOutput. +var ExpectedExtensions = []extensions.Extension{ListedExtension} + +// SingleExtension is the Extension that should be parsed from GetOutput. +var SingleExtension = &extensions.Extension{ + Updated: "2013-02-03T10:00:00-00:00", + Name: "agent", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/agent/api/v2.0", + Alias: "agent", + Description: "The agent management extension.", +} + +// HandleListExtensionsSuccessfully creates an HTTP handler at `/extensions` on the test handler +// mux that response with a list containing a single tenant. +func HandleListExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetExtensionSuccessfully creates an HTTP handler at `/extensions/agent` that responds with +// a JSON payload corresponding to SingleExtension. +func HandleGetExtensionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/requests_test.go new file mode 100644 index 000000000000..fbaedfa0bed6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/testing/requests_test.go @@ -0,0 +1,39 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/common/extensions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListExtensionsSuccessfully(t) + + count := 0 + + extensions.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := extensions.ExtractExtensions(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedExtensions, actual) + + return true, nil + }) + + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetExtensionSuccessfully(t) + + actual, err := extensions.Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SingleExtension, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go new file mode 100644 index 000000000000..eaf38b2d1911 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/common/extensions/urls.go @@ -0,0 +1,13 @@ +package extensions + +import "github.com/gophercloud/gophercloud" + +// ExtensionURL generates the URL for an extension resource by name. +func ExtensionURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL("extensions", name) +} + +// ListExtensionURL generates the URL for the extensions resource collection. +func ListExtensionURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("extensions") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/doc.go new file mode 100644 index 000000000000..97f1b033da10 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/doc.go @@ -0,0 +1,105 @@ +/* +Package aggregates manages information about the host aggregates in the +OpenStack cloud. + +Example of Create Aggregate + + opts := aggregates.CreateOpts{ + Name: "name", + AvailabilityZone: "london", + } + + aggregate, err := aggregates.Create(computeClient, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +Example of Show Aggregate Details + + aggregateID := 42 + aggregate, err := aggregates.Get(computeClient, aggregateID).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +Example of Delete Aggregate + + aggregateID := 32 + err := aggregates.Delete(computeClient, aggregateID).ExtractErr() + if err != nil { + panic(err) + } + +Example of Update Aggregate + + aggregateID := 42 + opts := aggregates.UpdateOpts{ + Name: "new_name", + AvailabilityZone: "nova2", + } + + aggregate, err := aggregates.Update(computeClient, aggregateID, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +Example of Retrieving list of all aggregates + + allPages, err := aggregates.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allAggregates, err := aggregates.ExtractAggregates(allPages) + if err != nil { + panic(err) + } + + for _, aggregate := range allAggregates { + fmt.Printf("%+v\n", aggregate) + } + +Example of Add Host + + aggregateID := 22 + opts := aggregates.AddHostOpts{ + Host: "newhost-cmp1", + } + + aggregate, err := aggregates.AddHost(computeClient, aggregateID, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +Example of Remove Host + + aggregateID := 22 + opts := aggregates.RemoveHostOpts{ + Host: "newhost-cmp1", + } + + aggregate, err := aggregates.RemoveHost(computeClient, aggregateID, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +Example of Create or Update Metadata + + aggregateID := 22 + opts := aggregates.SetMetadata{ + Metadata: map[string]string{"key": "value"}, + } + + aggregate, err := aggregates.SetMetadata(computeClient, aggregateID, opts).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", aggregate) + +*/ +package aggregates diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/requests.go new file mode 100644 index 000000000000..c37531c56a50 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/requests.go @@ -0,0 +1,162 @@ +package aggregates + +import ( + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the API to list aggregates. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, aggregatesListURL(client), func(r pagination.PageResult) pagination.Page { + return AggregatesPage{pagination.SinglePageBase(r)} + }) +} + +type CreateOpts struct { + // The name of the host aggregate. + Name string `json:"name" required:"true"` + + // The availability zone of the host aggregate. + // You should use a custom availability zone rather than + // the default returned by the os-availability-zone API. + // The availability zone must not include ‘:’ in its name. + AvailabilityZone string `json:"availability_zone,omitempty"` +} + +func (opts CreateOpts) ToAggregatesCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "aggregate") +} + +// Create makes a request against the API to create an aggregate. +func Create(client *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { + b, err := opts.ToAggregatesCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(aggregatesCreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete makes a request against the API to delete an aggregate. +func Delete(client *gophercloud.ServiceClient, aggregateID int) (r DeleteResult) { + v := strconv.Itoa(aggregateID) + _, r.Err = client.Delete(aggregatesDeleteURL(client, v), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get makes a request against the API to get details for a specific aggregate. +func Get(client *gophercloud.ServiceClient, aggregateID int) (r GetResult) { + v := strconv.Itoa(aggregateID) + _, r.Err = client.Get(aggregatesGetURL(client, v), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type UpdateOpts struct { + // The name of the host aggregate. + Name string `json:"name,omitempty"` + + // The availability zone of the host aggregate. + // You should use a custom availability zone rather than + // the default returned by the os-availability-zone API. + // The availability zone must not include ‘:’ in its name. + AvailabilityZone string `json:"availability_zone,omitempty"` +} + +func (opts UpdateOpts) ToAggregatesUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "aggregate") +} + +// Update makes a request against the API to update a specific aggregate. +func Update(client *gophercloud.ServiceClient, aggregateID int, opts UpdateOpts) (r UpdateResult) { + v := strconv.Itoa(aggregateID) + + b, err := opts.ToAggregatesUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(aggregatesUpdateURL(client, v), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type AddHostOpts struct { + // The name of the host. + Host string `json:"host" required:"true"` +} + +func (opts AddHostOpts) ToAggregatesAddHostMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "add_host") +} + +// AddHost makes a request against the API to add host to a specific aggregate. +func AddHost(client *gophercloud.ServiceClient, aggregateID int, opts AddHostOpts) (r ActionResult) { + v := strconv.Itoa(aggregateID) + + b, err := opts.ToAggregatesAddHostMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(aggregatesAddHostURL(client, v), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type RemoveHostOpts struct { + // The name of the host. + Host string `json:"host" required:"true"` +} + +func (opts RemoveHostOpts) ToAggregatesRemoveHostMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "remove_host") +} + +// RemoveHost makes a request against the API to remove host from a specific aggregate. +func RemoveHost(client *gophercloud.ServiceClient, aggregateID int, opts RemoveHostOpts) (r ActionResult) { + v := strconv.Itoa(aggregateID) + + b, err := opts.ToAggregatesRemoveHostMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(aggregatesRemoveHostURL(client, v), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type SetMetadataOpts struct { + Metadata map[string]interface{} `json:"metadata" required:"true"` +} + +func (opts SetMetadataOpts) ToSetMetadataMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "set_metadata") +} + +// SetMetadata makes a request against the API to set metadata to a specific aggregate. +func SetMetadata(client *gophercloud.ServiceClient, aggregateID int, opts SetMetadataOpts) (r ActionResult) { + v := strconv.Itoa(aggregateID) + + b, err := opts.ToSetMetadataMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(aggregatesSetMetadataURL(client, v), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/results.go new file mode 100644 index 000000000000..2ab0cf22f07f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/results.go @@ -0,0 +1,117 @@ +package aggregates + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Aggregate represents a host aggregate in the OpenStack cloud. +type Aggregate struct { + // The availability zone of the host aggregate. + AvailabilityZone string `json:"availability_zone"` + + // A list of host ids in this aggregate. + Hosts []string `json:"hosts"` + + // The ID of the host aggregate. + ID int `json:"id"` + + // Metadata key and value pairs associate with the aggregate. + Metadata map[string]string `json:"metadata"` + + // Name of the aggregate. + Name string `json:"name"` + + // The date and time when the resource was created. + CreatedAt time.Time `json:"-"` + + // The date and time when the resource was updated, + // if the resource has not been updated, this field will show as null. + UpdatedAt time.Time `json:"-"` + + // The date and time when the resource was deleted, + // if the resource has not been deleted yet, this field will be null. + DeletedAt time.Time `json:"-"` + + // A boolean indicates whether this aggregate is deleted or not, + // if it has not been deleted, false will appear. + Deleted bool `json:"deleted"` +} + +// UnmarshalJSON to override default +func (r *Aggregate) UnmarshalJSON(b []byte) error { + type tmp Aggregate + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Aggregate(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.DeletedAt = time.Time(s.DeletedAt) + + return nil +} + +// AggregatesPage represents a single page of all Aggregates from a List +// request. +type AggregatesPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Aggregates contains any results. +func (page AggregatesPage) IsEmpty() (bool, error) { + aggregates, err := ExtractAggregates(page) + return len(aggregates) == 0, err +} + +// ExtractAggregates interprets a page of results as a slice of Aggregates. +func ExtractAggregates(p pagination.Page) ([]Aggregate, error) { + var a struct { + Aggregates []Aggregate `json:"aggregates"` + } + err := (p.(AggregatesPage)).ExtractInto(&a) + return a.Aggregates, err +} + +type aggregatesResult struct { + gophercloud.Result +} + +func (r aggregatesResult) Extract() (*Aggregate, error) { + var s struct { + Aggregate *Aggregate `json:"aggregate"` + } + err := r.ExtractInto(&s) + return s.Aggregate, err +} + +type CreateResult struct { + aggregatesResult +} + +type GetResult struct { + aggregatesResult +} + +type DeleteResult struct { + gophercloud.ErrResult +} + +type UpdateResult struct { + aggregatesResult +} + +type ActionResult struct { + aggregatesResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/fixtures.go new file mode 100644 index 000000000000..9ae71d2230ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/fixtures.go @@ -0,0 +1,344 @@ +package testing + +import ( + "fmt" + "net/http" + "strconv" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// AggregateListBody is sample response to the List call +const AggregateListBody = ` +{ + "aggregates": [ + { + "name": "test-aggregate1", + "availability_zone": null, + "deleted": false, + "created_at": "2017-12-22T10:12:06.000000", + "updated_at": null, + "hosts": [], + "deleted_at": null, + "id": 1, + "metadata": {} + }, + { + "name": "test-aggregate2", + "availability_zone": "test-az", + "deleted": false, + "created_at": "2017-12-22T10:16:07.000000", + "updated_at": null, + "hosts": [ + "cmp0" + ], + "deleted_at": null, + "id": 4, + "metadata": { + "availability_zone": "test-az" + } + } + ] +} +` + +const AggregateCreateBody = ` +{ + "aggregate": { + "availability_zone": "london", + "created_at": "2016-12-27T22:51:32.000000", + "deleted": false, + "deleted_at": null, + "id": 32, + "name": "name", + "updated_at": null + } +} +` + +const AggregateGetBody = ` +{ + "aggregate": { + "name": "test-aggregate2", + "availability_zone": "test-az", + "deleted": false, + "created_at": "2017-12-22T10:16:07.000000", + "updated_at": null, + "hosts": [ + "cmp0" + ], + "deleted_at": null, + "id": 4, + "metadata": { + "availability_zone": "test-az" + } + } +} +` + +const AggregateUpdateBody = ` +{ + "aggregate": { + "name": "test-aggregate2", + "availability_zone": "nova2", + "deleted": false, + "created_at": "2017-12-22T10:12:06.000000", + "updated_at": "2017-12-23T10:18:00.000000", + "hosts": [], + "deleted_at": null, + "id": 1, + "metadata": { + "availability_zone": "nova2" + } + } +} +` + +const AggregateAddHostBody = ` +{ + "aggregate": { + "name": "test-aggregate2", + "availability_zone": "test-az", + "deleted": false, + "created_at": "2017-12-22T10:16:07.000000", + "updated_at": null, + "hosts": [ + "cmp0", + "cmp1" + ], + "deleted_at": null, + "id": 4, + "metadata": { + "availability_zone": "test-az" + } + } +} +` + +const AggregateRemoveHostBody = ` +{ + "aggregate": { + "name": "test-aggregate2", + "availability_zone": "nova2", + "deleted": false, + "created_at": "2017-12-22T10:12:06.000000", + "updated_at": "2017-12-23T10:18:00.000000", + "hosts": [], + "deleted_at": null, + "id": 1, + "metadata": { + "availability_zone": "nova2" + } + } +} +` + +const AggregateSetMetadataBody = ` +{ + "aggregate": { + "name": "test-aggregate2", + "availability_zone": "test-az", + "deleted": false, + "created_at": "2017-12-22T10:16:07.000000", + "updated_at": "2017-12-23T10:18:00.000000", + "hosts": [ + "cmp0" + ], + "deleted_at": null, + "id": 4, + "metadata": { + "availability_zone": "test-az", + "key": "value" + } + } +} +` + +var ( + // First aggregate from the AggregateListBody + FirstFakeAggregate = aggregates.Aggregate{ + AvailabilityZone: "", + Hosts: []string{}, + ID: 1, + Metadata: map[string]string{}, + Name: "test-aggregate1", + CreatedAt: time.Date(2017, 12, 22, 10, 12, 6, 0, time.UTC), + UpdatedAt: time.Time{}, + DeletedAt: time.Time{}, + Deleted: false, + } + + // Second aggregate from the AggregateListBody + SecondFakeAggregate = aggregates.Aggregate{ + AvailabilityZone: "test-az", + Hosts: []string{"cmp0"}, + ID: 4, + Metadata: map[string]string{"availability_zone": "test-az"}, + Name: "test-aggregate2", + CreatedAt: time.Date(2017, 12, 22, 10, 16, 7, 0, time.UTC), + UpdatedAt: time.Time{}, + DeletedAt: time.Time{}, + Deleted: false, + } + + // Aggregate from the AggregateCreateBody + CreatedAggregate = aggregates.Aggregate{ + AvailabilityZone: "london", + Hosts: nil, + ID: 32, + Metadata: nil, + Name: "name", + CreatedAt: time.Date(2016, 12, 27, 22, 51, 32, 0, time.UTC), + UpdatedAt: time.Time{}, + DeletedAt: time.Time{}, + Deleted: false, + } + + // Aggregate ID to delete + AggregateIDtoDelete = 1 + + // Aggregate ID to get, from the AggregateGetBody + AggregateIDtoGet = SecondFakeAggregate.ID + + // Aggregate ID to update + AggregateIDtoUpdate = FirstFakeAggregate.ID + + // Updated aggregate + UpdatedAggregate = aggregates.Aggregate{ + AvailabilityZone: "nova2", + Hosts: []string{}, + ID: 1, + Metadata: map[string]string{"availability_zone": "nova2"}, + Name: "test-aggregate2", + CreatedAt: time.Date(2017, 12, 22, 10, 12, 6, 0, time.UTC), + UpdatedAt: time.Date(2017, 12, 23, 10, 18, 0, 0, time.UTC), + DeletedAt: time.Time{}, + Deleted: false, + } + + AggregateWithAddedHost = aggregates.Aggregate{ + AvailabilityZone: "test-az", + Hosts: []string{"cmp0", "cmp1"}, + ID: 4, + Metadata: map[string]string{"availability_zone": "test-az"}, + Name: "test-aggregate2", + CreatedAt: time.Date(2017, 12, 22, 10, 16, 7, 0, time.UTC), + UpdatedAt: time.Time{}, + DeletedAt: time.Time{}, + Deleted: false, + } + + AggregateWithRemovedHost = aggregates.Aggregate{ + AvailabilityZone: "nova2", + Hosts: []string{}, + ID: 1, + Metadata: map[string]string{"availability_zone": "nova2"}, + Name: "test-aggregate2", + CreatedAt: time.Date(2017, 12, 22, 10, 12, 6, 0, time.UTC), + UpdatedAt: time.Date(2017, 12, 23, 10, 18, 0, 0, time.UTC), + DeletedAt: time.Time{}, + Deleted: false, + } + + AggregateWithUpdatedMetadata = aggregates.Aggregate{ + AvailabilityZone: "test-az", + Hosts: []string{"cmp0"}, + ID: 4, + Metadata: map[string]string{"availability_zone": "test-az", "key": "value"}, + Name: "test-aggregate2", + CreatedAt: time.Date(2017, 12, 22, 10, 16, 7, 0, time.UTC), + UpdatedAt: time.Date(2017, 12, 23, 10, 18, 0, 0, time.UTC), + DeletedAt: time.Time{}, + Deleted: false, + } +) + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-aggregates", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateListBody) + }) +} + +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-aggregates", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateCreateBody) + }) +} + +func HandleDeleteSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateIDtoDelete) + th.Mux.HandleFunc("/os-aggregates/"+v, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusOK) + }) +} + +func HandleGetSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateIDtoGet) + th.Mux.HandleFunc("/os-aggregates/"+v, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateGetBody) + }) +} + +func HandleUpdateSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateIDtoUpdate) + th.Mux.HandleFunc("/os-aggregates/"+v, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateUpdateBody) + }) +} + +func HandleAddHostSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateWithAddedHost.ID) + th.Mux.HandleFunc("/os-aggregates/"+v+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateAddHostBody) + }) +} + +func HandleRemoveHostSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateWithRemovedHost.ID) + th.Mux.HandleFunc("/os-aggregates/"+v+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateRemoveHostBody) + }) +} + +func HandleSetMetadataSuccessfully(t *testing.T) { + v := strconv.Itoa(AggregateWithUpdatedMetadata.ID) + th.Mux.HandleFunc("/os-aggregates/"+v+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, AggregateSetMetadataBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/requests_test.go new file mode 100644 index 000000000000..bfd18614ccae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/testing/requests_test.go @@ -0,0 +1,149 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListAggregates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + pages := 0 + err := aggregates.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := aggregates.ExtractAggregates(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 aggregates, got %d", len(actual)) + } + th.CheckDeepEquals(t, FirstFakeAggregate, actual[0]) + th.CheckDeepEquals(t, SecondFakeAggregate, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestCreateAggregates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + expected := CreatedAggregate + + opts := aggregates.CreateOpts{ + Name: "name", + AvailabilityZone: "london", + } + + actual, err := aggregates.Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestDeleteAggregates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := aggregates.Delete(client.ServiceClient(), AggregateIDtoDelete).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetAggregates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + expected := SecondFakeAggregate + + actual, err := aggregates.Get(client.ServiceClient(), AggregateIDtoGet).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestUpdateAggregate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + expected := UpdatedAggregate + + opts := aggregates.UpdateOpts{ + Name: "test-aggregates2", + AvailabilityZone: "nova2", + } + + actual, err := aggregates.Update(client.ServiceClient(), expected.ID, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestAddHostAggregate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddHostSuccessfully(t) + + expected := AggregateWithAddedHost + + opts := aggregates.AddHostOpts{ + Host: "cmp1", + } + + actual, err := aggregates.AddHost(client.ServiceClient(), expected.ID, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestRemoveHostAggregate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRemoveHostSuccessfully(t) + + expected := AggregateWithRemovedHost + + opts := aggregates.RemoveHostOpts{ + Host: "cmp1", + } + + actual, err := aggregates.RemoveHost(client.ServiceClient(), expected.ID, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} + +func TestSetMetadataAggregate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleSetMetadataSuccessfully(t) + + expected := AggregateWithUpdatedMetadata + + opts := aggregates.SetMetadataOpts{ + Metadata: map[string]interface{}{"key": "value"}, + } + + actual, err := aggregates.SetMetadata(client.ServiceClient(), expected.ID, opts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/urls.go new file mode 100644 index 000000000000..bb30c7fc90b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/aggregates/urls.go @@ -0,0 +1,35 @@ +package aggregates + +import "github.com/gophercloud/gophercloud" + +func aggregatesListURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-aggregates") +} + +func aggregatesCreateURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-aggregates") +} + +func aggregatesDeleteURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID) +} + +func aggregatesGetURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID) +} + +func aggregatesUpdateURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID) +} + +func aggregatesAddHostURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID, "action") +} + +func aggregatesRemoveHostURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID, "action") +} + +func aggregatesSetMetadataURL(c *gophercloud.ServiceClient, aggregateID string) string { + return c.ServiceURL("os-aggregates", aggregateID, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go new file mode 100644 index 000000000000..3653122bf30f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/doc.go @@ -0,0 +1,52 @@ +/* +Package attachinterfaces provides the ability to retrieve and manage network +interfaces through Nova. + +Example of Listing a Server's Interfaces + + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + allPages, err := attachinterfaces.List(computeClient, serverID).AllPages() + if err != nil { + panic(err) + } + + allInterfaces, err := attachinterfaces.ExtractInterfaces(allPages) + if err != nil { + panic(err) + } + + for _, interface := range allInterfaces { + fmt.Printf("%+v\n", interface) + } + +Example to Get a Server's Interface + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + interface, err := attachinterfaces.Get(computeClient, serverID, portID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Interface attachment on the Server + + networkID := "8a5fe506-7e9f-4091-899b-96336909d93c" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + attachOpts := attachinterfaces.CreateOpts{ + NetworkID: networkID, + } + interface, err := attachinterfaces.Create(computeClient, serverID, attachOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Interface attachment from the Server + + portID = "0dde1598-b374-474e-986f-5b8dd1df1d4e" + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + err := attachinterfaces.Delete(computeClient, serverID, portID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package attachinterfaces diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go new file mode 100644 index 000000000000..874f7a61ec4c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/requests.go @@ -0,0 +1,72 @@ +package attachinterfaces + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the nova API to list the server's interfaces. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return pagination.NewPager(client, listInterfaceURL(client, serverID), func(r pagination.PageResult) pagination.Page { + return InterfacePage{pagination.SinglePageBase(r)} + }) +} + +// Get requests details on a single interface attachment by the server and port IDs. +func Get(client *gophercloud.ServiceClient, serverID, portID string) (r GetResult) { + _, r.Err = client.Get(getInterfaceURL(client, serverID, portID), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToAttachInterfacesCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new interface attachment. +type CreateOpts struct { + // PortID is the ID of the port for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the PortID parameter, the OpenStack Networking API + // v2.0 allocates a port and creates an interface for it on the network. + PortID string `json:"port_id,omitempty"` + + // NetworkID is the ID of the network for which you want to create an interface. + // The NetworkID and PortID parameters are mutually exclusive. + // If you do not specify the NetworkID parameter, the OpenStack Networking + // API v2.0 uses the network information cache that is associated with the instance. + NetworkID string `json:"net_id,omitempty"` + + // Slice of FixedIPs. If you request a specific FixedIP address without a + // NetworkID, the request returns a Bad Request (400) response code. + // Note: this uses the FixedIP struct, but only the IPAddress field can be used. + FixedIPs []FixedIP `json:"fixed_ips,omitempty"` +} + +// ToAttachInterfacesCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToAttachInterfacesCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "interfaceAttachment") +} + +// Create requests the creation of a new interface attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToAttachInterfacesCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createInterfaceURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete makes a request against the nova API to detach a single interface from the server. +// It needs server and port IDs to make a such request. +func Delete(client *gophercloud.ServiceClient, serverID, portID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteInterfaceURL(client, serverID, portID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go new file mode 100644 index 000000000000..7d15e1ecb4bc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/results.go @@ -0,0 +1,80 @@ +package attachinterfaces + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type attachInterfaceResult struct { + gophercloud.Result +} + +// Extract interprets any attachInterfaceResult as an Interface, if possible. +func (r attachInterfaceResult) Extract() (*Interface, error) { + var s struct { + Interface *Interface `json:"interfaceAttachment"` + } + err := r.ExtractInto(&s) + return s.Interface, err +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as an Interface. +type GetResult struct { + attachInterfaceResult +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as an Interface. +type CreateResult struct { + attachInterfaceResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FixedIP represents a Fixed IP Address. +// This struct is also used when creating an attachment, +// but it is not possible to specify a SubnetID. +type FixedIP struct { + SubnetID string `json:"subnet_id,omitempty"` + IPAddress string `json:"ip_address"` +} + +// Interface represents a network interface on a server. +type Interface struct { + PortState string `json:"port_state"` + FixedIPs []FixedIP `json:"fixed_ips"` + PortID string `json:"port_id"` + NetID string `json:"net_id"` + MACAddr string `json:"mac_addr"` +} + +// InterfacePage abstracts the raw results of making a List() request against +// the API. +// +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractInterfaces call. +type InterfacePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an InterfacePage contains no interfaces. +func (r InterfacePage) IsEmpty() (bool, error) { + interfaces, err := ExtractInterfaces(r) + return len(interfaces) == 0, err +} + +// ExtractInterfaces interprets the results of a single page from a List() call, +// producing a slice of Interface structs. +func ExtractInterfaces(r pagination.Page) ([]Interface, error) { + var s struct { + Interfaces []Interface `json:"interfaceAttachments"` + } + err := (r.(InterfacePage)).ExtractInto(&s) + return s.Interfaces, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/doc.go new file mode 100644 index 000000000000..cfc07ad5580f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/doc.go @@ -0,0 +1,2 @@ +// attachinterfaces unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/fixtures.go new file mode 100644 index 000000000000..e701a93fc60e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/fixtures.go @@ -0,0 +1,162 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListInterfacesExpected represents an expected repsonse from a ListInterfaces request. +var ListInterfacesExpected = []attachinterfaces.Interface{ + { + PortState: "ACTIVE", + FixedIPs: []attachinterfaces.FixedIP{ + { + SubnetID: "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + IPAddress: "10.0.0.7", + }, + { + SubnetID: "45906d64-a548-4276-h1f8-kcffa80fjbnl", + IPAddress: "10.0.0.8", + }, + }, + PortID: "0dde1598-b374-474e-986f-5b8dd1df1d4e", + NetID: "8a5fe506-7e9f-4091-899b-96336909d93c", + MACAddr: "fa:16:3e:38:2d:80", + }, +} + +// GetInterfaceExpected represents an expected repsonse from a GetInterface request. +var GetInterfaceExpected = attachinterfaces.Interface{ + PortState: "ACTIVE", + FixedIPs: []attachinterfaces.FixedIP{ + { + SubnetID: "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + IPAddress: "10.0.0.7", + }, + { + SubnetID: "45906d64-a548-4276-h1f8-kcffa80fjbnl", + IPAddress: "10.0.0.8", + }, + }, + PortID: "0dde1598-b374-474e-986f-5b8dd1df1d4e", + NetID: "8a5fe506-7e9f-4091-899b-96336909d93c", + MACAddr: "fa:16:3e:38:2d:80", +} + +// CreateInterfacesExpected represents an expected repsonse from a CreateInterface request. +var CreateInterfacesExpected = attachinterfaces.Interface{ + PortState: "ACTIVE", + FixedIPs: []attachinterfaces.FixedIP{ + { + SubnetID: "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + IPAddress: "10.0.0.7", + }, + }, + PortID: "0dde1598-b374-474e-986f-5b8dd1df1d4e", + NetID: "8a5fe506-7e9f-4091-899b-96336909d93c", + MACAddr: "fa:16:3e:38:2d:80", +} + +// HandleInterfaceListSuccessfully sets up the test server to respond to a ListInterfaces request. +func HandleInterfaceListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/b07e7a3b-d951-4efc-a4f9-ac9f001afb7f/os-interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "interfaceAttachments": [ + { + "port_state":"ACTIVE", + "fixed_ips": [ + { + "subnet_id": "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + "ip_address": "10.0.0.7" + }, + { + "subnet_id": "45906d64-a548-4276-h1f8-kcffa80fjbnl", + "ip_address": "10.0.0.8" + } + ], + "port_id": "0dde1598-b374-474e-986f-5b8dd1df1d4e", + "net_id": "8a5fe506-7e9f-4091-899b-96336909d93c", + "mac_addr": "fa:16:3e:38:2d:80" + } + ] + }`) + }) +} + +// HandleInterfaceGetSuccessfully sets up the test server to respond to a GetInterface request. +func HandleInterfaceGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/b07e7a3b-d951-4efc-a4f9-ac9f001afb7f/os-interface/0dde1598-b374-474e-986f-5b8dd1df1d4e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "interfaceAttachment": + { + "port_state":"ACTIVE", + "fixed_ips": [ + { + "subnet_id": "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + "ip_address": "10.0.0.7" + }, + { + "subnet_id": "45906d64-a548-4276-h1f8-kcffa80fjbnl", + "ip_address": "10.0.0.8" + } + ], + "port_id": "0dde1598-b374-474e-986f-5b8dd1df1d4e", + "net_id": "8a5fe506-7e9f-4091-899b-96336909d93c", + "mac_addr": "fa:16:3e:38:2d:80" + } + }`) + }) +} + +// HandleInterfaceCreateSuccessfully sets up the test server to respond to a CreateInterface request. +func HandleInterfaceCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/b07e7a3b-d951-4efc-a4f9-ac9f001afb7f/os-interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "interfaceAttachment": { + "net_id": "8a5fe506-7e9f-4091-899b-96336909d93c" + } + }`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "interfaceAttachment": + { + "port_state":"ACTIVE", + "fixed_ips": [ + { + "subnet_id": "d7906db4-a566-4546-b1f4-5c7fa70f0bf3", + "ip_address": "10.0.0.7" + } + ], + "port_id": "0dde1598-b374-474e-986f-5b8dd1df1d4e", + "net_id": "8a5fe506-7e9f-4091-899b-96336909d93c", + "mac_addr": "fa:16:3e:38:2d:80" + } + }`) + }) +} + +// HandleInterfaceDeleteSuccessfully sets up the test server to respond to a DeleteInterface request. +func HandleInterfaceDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/b07e7a3b-d951-4efc-a4f9-ac9f001afb7f/os-interface/0dde1598-b374-474e-986f-5b8dd1df1d4e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/requests_test.go new file mode 100644 index 000000000000..1ffbd61e755b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/testing/requests_test.go @@ -0,0 +1,89 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleInterfaceListSuccessfully(t) + + expected := ListInterfacesExpected + pages := 0 + err := attachinterfaces.List(client.ServiceClient(), "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := attachinterfaces.ExtractInterfaces(page) + th.AssertNoErr(t, err) + + if len(actual) != 1 { + t.Fatalf("Expected 1 interface, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListInterfacesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleInterfaceListSuccessfully(t) + + allPages, err := attachinterfaces.List(client.ServiceClient(), "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f").AllPages() + th.AssertNoErr(t, err) + _, err = attachinterfaces.ExtractInterfaces(allPages) + th.AssertNoErr(t, err) +} + +func TestGetInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleInterfaceGetSuccessfully(t) + + expected := GetInterfaceExpected + + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + interfaceID := "0dde1598-b374-474e-986f-5b8dd1df1d4e" + + actual, err := attachinterfaces.Get(client.ServiceClient(), serverID, interfaceID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, actual) +} + +func TestCreateInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleInterfaceCreateSuccessfully(t) + + expected := CreateInterfacesExpected + + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + networkID := "8a5fe506-7e9f-4091-899b-96336909d93c" + + actual, err := attachinterfaces.Create(client.ServiceClient(), serverID, attachinterfaces.CreateOpts{ + NetworkID: networkID, + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, actual) +} + +func TestDeleteInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleInterfaceDeleteSuccessfully(t) + + serverID := "b07e7a3b-d951-4efc-a4f9-ac9f001afb7f" + portID := "0dde1598-b374-474e-986f-5b8dd1df1d4e" + + err := attachinterfaces.Delete(client.ServiceClient(), serverID, portID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go new file mode 100644 index 000000000000..50292e8b5a55 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/attachinterfaces/urls.go @@ -0,0 +1,18 @@ +package attachinterfaces + +import "github.com/gophercloud/gophercloud" + +func listInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { + return client.ServiceURL("servers", serverID, "os-interface") +} + +func getInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} + +func createInterfaceURL(client *gophercloud.ServiceClient, serverID string) string { + return client.ServiceURL("servers", serverID, "os-interface") +} +func deleteInterfaceURL(client *gophercloud.ServiceClient, serverID, portID string) string { + return client.ServiceURL("servers", serverID, "os-interface", portID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go new file mode 100644 index 000000000000..29b554d2137c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/doc.go @@ -0,0 +1,61 @@ +/* +Package availabilityzones provides the ability to get lists and detailed +availability zone information and to extend a server result with +availability zone information. + +Example of Extend server result with Availability Zone Information: + + type ServerWithAZ struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + } + + var allServers []ServerWithAZ + + allPages, err := servers.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve servers: %s", err) + } + + err = servers.ExtractServersInto(allPages, &allServers) + if err != nil { + panic("Unable to extract servers: %s", err) + } + + for _, server := range allServers { + fmt.Println(server.AvailabilityZone) + } + +Example of Get Availability Zone Information + + allPages, err := availabilityzones.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + if err != nil { + panic(err) + } + + for _, zoneInfo := range availabilityZoneInfo { + fmt.Printf("%+v\n", zoneInfo) + } + +Example of Get Detailed Availability Zone Information + + allPages, err := availabilityzones.ListDetail(computeClient).AllPages() + if err != nil { + panic(err) + } + + availabilityZoneInfo, err := availabilityzones.ExtractAvailabilityZones(allPages) + if err != nil { + panic(err) + } + + for _, zoneInfo := range availabilityZoneInfo { + fmt.Printf("%+v\n", zoneInfo) + } +*/ +package availabilityzones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go new file mode 100644 index 000000000000..f9a2e86e03d4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/requests.go @@ -0,0 +1,20 @@ +package availabilityzones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will return the existing availability zones. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} + +// ListDetail will return the existing availability zones with detailed information. +func ListDetail(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listDetailURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go new file mode 100644 index 000000000000..d48a0ea85836 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/results.go @@ -0,0 +1,76 @@ +package availabilityzones + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ServerAvailabilityZoneExt is an extension to the base Server object. +type ServerAvailabilityZoneExt struct { + // AvailabilityZone is the availabilty zone the server is in. + AvailabilityZone string `json:"OS-EXT-AZ:availability_zone"` +} + +// ServiceState represents the state of a service in an AvailabilityZone. +type ServiceState struct { + Active bool `json:"active"` + Available bool `json:"available"` + UpdatedAt time.Time `json:"-"` +} + +// UnmarshalJSON to override default +func (r *ServiceState) UnmarshalJSON(b []byte) error { + type tmp ServiceState + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ServiceState(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +// Services is a map of services contained in an AvailabilityZone. +type Services map[string]ServiceState + +// Hosts is map of hosts/nodes contained in an AvailabilityZone. +// Each host can have multiple services. +type Hosts map[string]Services + +// ZoneState represents the current state of the availability zone. +type ZoneState struct { + // Returns true if the availability zone is available + Available bool `json:"available"` +} + +// AvailabilityZone contains all the information associated with an OpenStack +// AvailabilityZone. +type AvailabilityZone struct { + Hosts Hosts `json:"hosts"` + // The availability zone name + ZoneName string `json:"zoneName"` + ZoneState ZoneState `json:"zoneState"` +} + +type AvailabilityZonePage struct { + pagination.SinglePageBase +} + +// ExtractAvailabilityZones returns a slice of AvailabilityZones contained in a +// single page of results. +func ExtractAvailabilityZones(r pagination.Page) ([]AvailabilityZone, error) { + var s struct { + AvailabilityZoneInfo []AvailabilityZone `json:"availabilityZoneInfo"` + } + err := (r.(AvailabilityZonePage)).ExtractInto(&s) + return s.AvailabilityZoneInfo, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/doc.go new file mode 100644 index 000000000000..a4408d7a0d3f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/doc.go @@ -0,0 +1,2 @@ +// availabilityzones unittests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/fixtures.go new file mode 100644 index 000000000000..9cc6d46379ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/fixtures.go @@ -0,0 +1,197 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + az "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const GetOutput = ` +{ + "availabilityZoneInfo": [ + { + "hosts": null, + "zoneName": "nova", + "zoneState": { + "available": true + } + } + ] +} +` + +const GetDetailOutput = ` +{ + "availabilityZoneInfo": [ + { + "hosts": { + "localhost": { + "nova-cert": { + "active": true, + "available": false, + "updated_at": "2017-10-14T17:03:39.000000" + }, + "nova-conductor": { + "active": true, + "available": false, + "updated_at": "2017-10-14T17:04:09.000000" + }, + "nova-consoleauth": { + "active": true, + "available": false, + "updated_at": "2017-10-14T17:04:18.000000" + }, + "nova-scheduler": { + "active": true, + "available": false, + "updated_at": "2017-10-14T17:04:30.000000" + } + }, + "openstack-acc-tests.novalocal": { + "nova-cert": { + "active": true, + "available": true, + "updated_at": "2018-01-04T04:11:19.000000" + }, + "nova-conductor": { + "active": true, + "available": true, + "updated_at": "2018-01-04T04:11:22.000000" + }, + "nova-consoleauth": { + "active": true, + "available": true, + "updated_at": "2018-01-04T04:11:20.000000" + }, + "nova-scheduler": { + "active": true, + "available": true, + "updated_at": "2018-01-04T04:11:23.000000" + } + } + }, + "zoneName": "internal", + "zoneState": { + "available": true + } + }, + { + "hosts": { + "openstack-acc-tests.novalocal": { + "nova-compute": { + "active": true, + "available": true, + "updated_at": "2018-01-04T04:11:23.000000" + } + } + }, + "zoneName": "nova", + "zoneState": { + "available": true + } + } + ] +}` + +var AZResult = []az.AvailabilityZone{ + { + Hosts: nil, + ZoneName: "nova", + ZoneState: az.ZoneState{Available: true}, + }, +} + +var AZDetailResult = []az.AvailabilityZone{ + { + Hosts: az.Hosts{ + "localhost": az.Services{ + "nova-cert": az.ServiceState{ + Active: true, + Available: false, + UpdatedAt: time.Date(2017, 10, 14, 17, 3, 39, 0, time.UTC), + }, + "nova-conductor": az.ServiceState{ + Active: true, + Available: false, + UpdatedAt: time.Date(2017, 10, 14, 17, 4, 9, 0, time.UTC), + }, + "nova-consoleauth": az.ServiceState{ + Active: true, + Available: false, + UpdatedAt: time.Date(2017, 10, 14, 17, 4, 18, 0, time.UTC), + }, + "nova-scheduler": az.ServiceState{ + Active: true, + Available: false, + UpdatedAt: time.Date(2017, 10, 14, 17, 4, 30, 0, time.UTC), + }, + }, + "openstack-acc-tests.novalocal": az.Services{ + "nova-cert": az.ServiceState{ + Active: true, + Available: true, + UpdatedAt: time.Date(2018, 1, 4, 4, 11, 19, 0, time.UTC), + }, + "nova-conductor": az.ServiceState{ + Active: true, + Available: true, + UpdatedAt: time.Date(2018, 1, 4, 4, 11, 22, 0, time.UTC), + }, + "nova-consoleauth": az.ServiceState{ + Active: true, + Available: true, + UpdatedAt: time.Date(2018, 1, 4, 4, 11, 20, 0, time.UTC), + }, + "nova-scheduler": az.ServiceState{ + Active: true, + Available: true, + UpdatedAt: time.Date(2018, 1, 4, 4, 11, 23, 0, time.UTC), + }, + }, + }, + ZoneName: "internal", + ZoneState: az.ZoneState{Available: true}, + }, + { + Hosts: az.Hosts{ + "openstack-acc-tests.novalocal": az.Services{ + "nova-compute": az.ServiceState{ + Active: true, + Available: true, + UpdatedAt: time.Date(2018, 1, 4, 4, 11, 23, 0, time.UTC), + }, + }, + }, + ZoneName: "nova", + ZoneState: az.ZoneState{Available: true}, + }, +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for availability zone information. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-availability-zone", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleGetDetailSuccessfully configures the test server to respond to a Get request +// for detailed availability zone information. +func HandleGetDetailSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-availability-zone/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetDetailOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/requests_test.go new file mode 100644 index 000000000000..8996d366d04b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/testing/requests_test.go @@ -0,0 +1,41 @@ +package testing + +import ( + "testing" + + az "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// Verifies that availability zones can be listed correctly +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t) + + allPages, err := az.List(client.ServiceClient()).AllPages() + th.AssertNoErr(t, err) + + actual, err := az.ExtractAvailabilityZones(allPages) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, AZResult, actual) +} + +// Verifies that detailed availability zones can be listed correctly +func TestListDetail(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetDetailSuccessfully(t) + + allPages, err := az.ListDetail(client.ServiceClient()).AllPages() + th.AssertNoErr(t, err) + + actual, err := az.ExtractAvailabilityZones(allPages) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, AZDetailResult, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go new file mode 100644 index 000000000000..9d99ec74b76d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones/urls.go @@ -0,0 +1,11 @@ +package availabilityzones + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone") +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone", "detail") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go new file mode 100644 index 000000000000..d291325e0a1d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/doc.go @@ -0,0 +1,152 @@ +/* +Package bootfromvolume extends a server create request with the ability to +specify block device options. This can be used to boot a server from a block +storage volume as well as specify multiple ephemeral disks upon creation. + +It is recommended to refer to the Block Device Mapping documentation to see +all possible ways to configure a server's block devices at creation time: + +https://docs.openstack.org/nova/latest/user/block-device-mapping.html + +Note that this package implements `block_device_mapping_v2`. + +Example of Creating a Server From an Image + +This example will boot a server from an image and use a standard ephemeral +disk as the server's root disk. This is virtually no different than creating +a server without using block device mappings. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From a New Volume + +This example will create a block storage volume based on the given Image. The +server will use this volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 2, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server From an Existing Volume + +This example will create a server with an existing volume as its root disk. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: "volume-uuid", + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Creating a Server with Multiple Ephemeral Disks + +This example will create a server with multiple ephemeral disks. The first +block device will be based off of an existing Image. Each additional +ephemeral disks must have an index of -1. + + blockDevices := []bootfromvolume.BlockDevice{ + bootfromvolume.BlockDevice{ + BootIndex: 0, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + SourceType: bootfromvolume.SourceImage, + UUID: "image-uuid", + VolumeSize: 5, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + bootfromvolume.BlockDevice{ + BootIndex: -1, + DestinationType: bootfromvolume.DestinationLocal, + DeleteOnTermination: true, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + FlavorRef: "flavor-uuid", + ImageRef: "image-uuid", + } + + createOpts := bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + BlockDevice: blockDevices, + } + + server, err := bootfromvolume.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package bootfromvolume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go new file mode 100644 index 000000000000..30c6170117d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/requests.go @@ -0,0 +1,128 @@ +package bootfromvolume + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +type ( + // DestinationType represents the type of medium being used as the + // destination of the bootable device. + DestinationType string + + // SourceType represents the type of medium being used as the source of the + // bootable device. + SourceType string +) + +const ( + // DestinationLocal DestinationType is for using an ephemeral disk as the + // destination. + DestinationLocal DestinationType = "local" + + // DestinationVolume DestinationType is for using a volume as the destination. + DestinationVolume DestinationType = "volume" + + // SourceBlank SourceType is for a "blank" or empty source. + SourceBlank SourceType = "blank" + + // SourceImage SourceType is for using images as the source of a block device. + SourceImage SourceType = "image" + + // SourceSnapshot SourceType is for using a volume snapshot as the source of + // a block device. + SourceSnapshot SourceType = "snapshot" + + // SourceVolume SourceType is for using a volume as the source of block + // device. + SourceVolume SourceType = "volume" +) + +// BlockDevice is a structure with options for creating block devices in a +// server. The block device may be created from an image, snapshot, new volume, +// or existing volume. The destination may be a new volume, existing volume +// which will be attached to the instance, ephemeral disk, or boot device. +type BlockDevice struct { + // SourceType must be one of: "volume", "snapshot", "image", or "blank". + SourceType SourceType `json:"source_type" required:"true"` + + // UUID is the unique identifier for the existing volume, snapshot, or + // image (see above). + UUID string `json:"uuid,omitempty"` + + // BootIndex is the boot index. It defaults to 0. + BootIndex int `json:"boot_index"` + + // DeleteOnTermination specifies whether or not to delete the attached volume + // when the server is deleted. Defaults to `false`. + DeleteOnTermination bool `json:"delete_on_termination"` + + // DestinationType is the type that gets created. Possible values are "volume" + // and "local". + DestinationType DestinationType `json:"destination_type,omitempty"` + + // GuestFormat specifies the format of the block device. + GuestFormat string `json:"guest_format,omitempty"` + + // VolumeSize is the size of the volume to create (in gigabytes). This can be + // omitted for existing volumes. + VolumeSize int `json:"volume_size,omitempty"` + + // DeviceType specifies the device type of the block devices. + // Examples of this are disk, cdrom, floppy, lun, etc. + DeviceType string `json:"device_type,omitempty"` + + // DiskBus is the bus type of the block devices. + // Examples of this are ide, usb, virtio, scsi, etc. + DiskBus string `json:"disk_bus,omitempty"` +} + +// CreateOptsExt is a structure that extends the server `CreateOpts` structure +// by allowing for a block device mapping. +type CreateOptsExt struct { + servers.CreateOptsBuilder + BlockDevice []BlockDevice `json:"block_device_mapping_v2,omitempty"` +} + +// ToServerCreateMap adds the block device mapping option to the base server +// creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if len(opts.BlockDevice) == 0 { + err := gophercloud.ErrMissingInput{} + err.Argument = "bootfromvolume.CreateOptsExt.BlockDevice" + return nil, err + } + + serverMap := base["server"].(map[string]interface{}) + + blockDevice := make([]map[string]interface{}, len(opts.BlockDevice)) + + for i, bd := range opts.BlockDevice { + b, err := gophercloud.BuildRequestBody(bd, "") + if err != nil { + return nil, err + } + blockDevice[i] = b + } + serverMap["block_device_mapping_v2"] = blockDevice + + return base, nil +} + +// Create requests the creation of a server from the given block device mapping. +func Create(client *gophercloud.ServiceClient, opts servers.CreateOptsBuilder) (r servers.CreateResult) { + b, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go new file mode 100644 index 000000000000..ba1eafabcd09 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/results.go @@ -0,0 +1,12 @@ +package bootfromvolume + +import ( + os "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// CreateResult temporarily contains the response from a Create call. +// It embeds the standard servers.CreateResults type and so can be used the +// same way as a standard server request result. +type CreateResult struct { + os.CreateResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/doc.go new file mode 100644 index 000000000000..cf5048acbaca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/doc.go @@ -0,0 +1,2 @@ +// bootfromvolume unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/fixtures.go new file mode 100644 index 000000000000..484e40a09a24 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/fixtures.go @@ -0,0 +1,275 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +var BaseCreateOpts = servers.CreateOpts{ + Name: "createdserver", + FlavorRef: "performance1-1", +} + +var BaseCreateOptsWithImageRef = servers.CreateOpts{ + Name: "createdserver", + FlavorRef: "performance1-1", + ImageRef: "asdfasdfasdf", +} + +const ExpectedNewVolumeRequest = ` +{ + "server": { + "name":"createdserver", + "flavorRef":"performance1-1", + "imageRef":"", + "block_device_mapping_v2":[ + { + "uuid":"123456", + "source_type":"image", + "destination_type":"volume", + "boot_index": 0, + "delete_on_termination": true, + "volume_size": 10 + } + ] + } +} +` + +var NewVolumeRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOpts, + BlockDevice: []bootfromvolume.BlockDevice{ + { + UUID: "123456", + SourceType: bootfromvolume.SourceImage, + DestinationType: bootfromvolume.DestinationVolume, + VolumeSize: 10, + DeleteOnTermination: true, + }, + }, +} + +const ExpectedExistingVolumeRequest = ` +{ + "server": { + "name":"createdserver", + "flavorRef":"performance1-1", + "imageRef":"", + "block_device_mapping_v2":[ + { + "uuid":"123456", + "source_type":"volume", + "destination_type":"volume", + "boot_index": 0, + "delete_on_termination": true + } + ] + } +} +` + +var ExistingVolumeRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOpts, + BlockDevice: []bootfromvolume.BlockDevice{ + { + UUID: "123456", + SourceType: bootfromvolume.SourceVolume, + DestinationType: bootfromvolume.DestinationVolume, + DeleteOnTermination: true, + }, + }, +} + +const ExpectedImageRequest = ` +{ + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "block_device_mapping_v2":[ + { + "boot_index": 0, + "delete_on_termination": true, + "destination_type":"local", + "source_type":"image", + "uuid":"asdfasdfasdf" + } + ] + } +} +` + +var ImageRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOptsWithImageRef, + BlockDevice: []bootfromvolume.BlockDevice{ + { + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "asdfasdfasdf", + }, + }, +} + +const ExpectedMultiEphemeralRequest = ` +{ + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "block_device_mapping_v2":[ + { + "boot_index": 0, + "delete_on_termination": true, + "destination_type":"local", + "source_type":"image", + "uuid":"asdfasdfasdf" + }, + { + "boot_index": -1, + "delete_on_termination": true, + "destination_type":"local", + "guest_format":"ext4", + "source_type":"blank", + "volume_size": 1 + }, + { + "boot_index": -1, + "delete_on_termination": true, + "destination_type":"local", + "guest_format":"ext4", + "source_type":"blank", + "volume_size": 1 + } + ] + } +} +` + +var MultiEphemeralRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOptsWithImageRef, + BlockDevice: []bootfromvolume.BlockDevice{ + { + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "asdfasdfasdf", + }, + { + BootIndex: -1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + { + BootIndex: -1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + GuestFormat: "ext4", + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + }, + }, +} + +const ExpectedImageAndNewVolumeRequest = ` +{ + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "block_device_mapping_v2":[ + { + "boot_index": 0, + "delete_on_termination": true, + "destination_type":"local", + "source_type":"image", + "uuid":"asdfasdfasdf" + }, + { + "boot_index": 1, + "delete_on_termination": true, + "destination_type":"volume", + "source_type":"blank", + "volume_size": 1, + "device_type": "disk", + "disk_bus": "scsi" + } + ] + } +} +` + +var ImageAndNewVolumeRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOptsWithImageRef, + BlockDevice: []bootfromvolume.BlockDevice{ + { + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "asdfasdfasdf", + }, + { + BootIndex: 1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceBlank, + VolumeSize: 1, + DeviceType: "disk", + DiskBus: "scsi", + }, + }, +} + +const ExpectedImageAndExistingVolumeRequest = ` +{ + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "block_device_mapping_v2":[ + { + "boot_index": 0, + "delete_on_termination": true, + "destination_type":"local", + "source_type":"image", + "uuid":"asdfasdfasdf" + }, + { + "boot_index": 1, + "delete_on_termination": true, + "destination_type":"volume", + "source_type":"volume", + "uuid":"123456", + "volume_size": 1 + } + ] + } +} +` + +var ImageAndExistingVolumeRequest = bootfromvolume.CreateOptsExt{ + CreateOptsBuilder: BaseCreateOptsWithImageRef, + BlockDevice: []bootfromvolume.BlockDevice{ + { + BootIndex: 0, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationLocal, + SourceType: bootfromvolume.SourceImage, + UUID: "asdfasdfasdf", + }, + { + BootIndex: 1, + DeleteOnTermination: true, + DestinationType: bootfromvolume.DestinationVolume, + SourceType: bootfromvolume.SourceVolume, + UUID: "123456", + VolumeSize: 1, + }, + }, +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/requests_test.go new file mode 100644 index 000000000000..6b59ff722e23 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/testing/requests_test.go @@ -0,0 +1,44 @@ +package testing + +import ( + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestBootFromNewVolume(t *testing.T) { + + actual, err := NewVolumeRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedNewVolumeRequest, actual) +} + +func TestBootFromExistingVolume(t *testing.T) { + actual, err := ExistingVolumeRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedExistingVolumeRequest, actual) +} + +func TestBootFromImage(t *testing.T) { + actual, err := ImageRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedImageRequest, actual) +} + +func TestCreateMultiEphemeralOpts(t *testing.T) { + actual, err := MultiEphemeralRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedMultiEphemeralRequest, actual) +} + +func TestAttachNewVolume(t *testing.T) { + actual, err := ImageAndNewVolumeRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedImageAndNewVolumeRequest, actual) +} + +func TestAttachExistingVolume(t *testing.T) { + actual, err := ImageAndExistingVolumeRequest.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, ExpectedImageAndExistingVolumeRequest, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go new file mode 100644 index 000000000000..dc007eadf867 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/bootfromvolume/urls.go @@ -0,0 +1,7 @@ +package bootfromvolume + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-volumes_boot") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go new file mode 100644 index 000000000000..255213555d1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/doc.go @@ -0,0 +1,55 @@ +/* +Package defsecrules enables management of default security group rules. + +Default security group rules are rules that are managed in the "default" +security group. + +This is only applicable in environments running nova-network. This package will +not work if the OpenStack environment is running the OpenStack Networking +(Neutron) service. + +Example of Listing Default Security Group Rules + + allPages, err := defsecrules.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allDefaultRules, err := defsecrules.ExtractDefaultRules(allPages) + if err != nil { + panic(err) + } + + for _, df := range allDefaultRules { + fmt.Printf("%+v\n", df) + } + +Example of Retrieving a Default Security Group Rule + + rule, err := defsecrules.Get(computeClient, "rule-id").Extract() + if err != nil { + panic(err) + } + +Example of Creating a Default Security Group Rule + + createOpts := defsecrules.CreateOpts{ + IPProtocol: "TCP", + FromPort: 80, + ToPort: 80, + CIDR: "10.10.12.0/24", + } + + rule, err := defsecrules.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example of Deleting a Default Security Group Rule + + err := defsecrules.Delete(computeClient, "rule-id").ExtractErr() + if err != nil { + panic(err) + } +*/ +package defsecrules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go new file mode 100644 index 000000000000..b63a5d5d63d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/requests.go @@ -0,0 +1,75 @@ +package defsecrules + +import ( + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will return a collection of default rules. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, rootURL(client), func(r pagination.PageResult) pagination.Page { + return DefaultRulePage{pagination.SinglePageBase(r)} + }) +} + +// CreateOpts represents the configuration for adding a new default rule. +type CreateOpts struct { + // The lower bound of the port range that will be opened. + FromPort int `json:"from_port"` + + // The upper bound of the port range that will be opened. + ToPort int `json:"to_port"` + + // The protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol" required:"true"` + + // ONLY required if FromGroupID is blank. This represents the IP range that + // will be the source of network traffic to your security group. + // + // Use 0.0.0.0/0 to allow all IPv4 addresses. + // Use ::/0 to allow all IPv6 addresses. + CIDR string `json:"cidr,omitempty"` +} + +// CreateOptsBuilder builds the create rule options into a serializable format. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds the create rule options into a serializable format. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + if opts.FromPort == 0 && strings.ToUpper(opts.IPProtocol) != "ICMP" { + return nil, gophercloud.ErrMissingInput{Argument: "FromPort"} + } + if opts.ToPort == 0 && strings.ToUpper(opts.IPProtocol) != "ICMP" { + return nil, gophercloud.ErrMissingInput{Argument: "ToPort"} + } + return gophercloud.BuildRequestBody(opts, "security_group_default_rule") +} + +// Create is the operation responsible for creating a new default rule. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get will return details for a particular default rule. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete will permanently delete a rule the project's default security group. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go new file mode 100644 index 000000000000..98c18fe560e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/results.go @@ -0,0 +1,73 @@ +package defsecrules + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/pagination" +) + +// DefaultRule represents a rule belonging to the "default" security group. +// It is identical to an openstack/compute/v2/extensions/secgroups.Rule. +type DefaultRule secgroups.Rule + +func (r *DefaultRule) UnmarshalJSON(b []byte) error { + var s secgroups.Rule + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = DefaultRule(s) + return nil +} + +// DefaultRulePage is a single page of a DefaultRule collection. +type DefaultRulePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of default rules contains any results. +func (page DefaultRulePage) IsEmpty() (bool, error) { + users, err := ExtractDefaultRules(page) + return len(users) == 0, err +} + +// ExtractDefaultRules returns a slice of DefaultRules contained in a single +// page of results. +func ExtractDefaultRules(r pagination.Page) ([]DefaultRule, error) { + var s struct { + DefaultRules []DefaultRule `json:"security_group_default_rules"` + } + err := (r.(DefaultRulePage)).ExtractInto(&s) + return s.DefaultRules, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// Extract will extract a DefaultRule struct from a Create or Get response. +func (r commonResult) Extract() (*DefaultRule, error) { + var s struct { + DefaultRule DefaultRule `json:"security_group_default_rule"` + } + err := r.ExtractInto(&s) + return &s.DefaultRule, err +} + +// DeleteResult is the response from a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/doc.go new file mode 100644 index 000000000000..6eeb60f057f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/doc.go @@ -0,0 +1,2 @@ +// defsecrules unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/fixtures.go new file mode 100644 index 000000000000..e4a62d4ecc98 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/fixtures.go @@ -0,0 +1,143 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const rootPath = "/os-security-group-default-rules" + +func mockListRulesResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rules": [ + { + "from_port": 80, + "id": "{ruleID}", + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.10.0/24" + }, + "to_port": 80 + } + ] +} + `) + }) +} + +func mockCreateRuleResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_default_rule": { + "ip_protocol": "TCP", + "from_port": 80, + "to_port": 80, + "cidr": "10.10.12.0/24" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rule": { + "from_port": 80, + "id": "{ruleID}", + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.12.0/24" + }, + "to_port": 80 + } +} +`) + }) +} + +func mockCreateRuleResponseICMPZero(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_default_rule": { + "ip_protocol": "ICMP", + "from_port": 0, + "to_port": 0, + "cidr": "10.10.12.0/24" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rule": { + "from_port": 0, + "id": "{ruleID}", + "ip_protocol": "ICMP", + "ip_range": { + "cidr": "10.10.12.0/24" + }, + "to_port": 0 + } +} +`) + }) +} + +func mockGetRuleResponse(t *testing.T, ruleID string) { + url := rootPath + "/" + ruleID + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_default_rule": { + "id": "{ruleID}", + "from_port": 80, + "to_port": 80, + "ip_protocol": "TCP", + "ip_range": { + "cidr": "10.10.12.0/24" + } + } +} + `) + }) +} + +func mockDeleteRuleResponse(t *testing.T, ruleID string) { + url := rootPath + "/" + ruleID + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/requests_test.go new file mode 100644 index 000000000000..1f2fb8686ae9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/testing/requests_test.go @@ -0,0 +1,127 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ruleID = "{ruleID}" + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListRulesResponse(t) + + count := 0 + + err := defsecrules.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := defsecrules.ExtractDefaultRules(page) + th.AssertNoErr(t, err) + + expected := []defsecrules.DefaultRule{ + { + FromPort: 80, + ID: ruleID, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.10.0/24"}, + ToPort: 80, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateRuleResponse(t) + + opts := defsecrules.CreateOpts{ + IPProtocol: "TCP", + FromPort: 80, + ToPort: 80, + CIDR: "10.10.12.0/24", + } + + group, err := defsecrules.Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &defsecrules.DefaultRule{ + ID: ruleID, + FromPort: 80, + ToPort: 80, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.12.0/24"}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestCreateICMPZero(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateRuleResponseICMPZero(t) + + opts := defsecrules.CreateOpts{ + IPProtocol: "ICMP", + FromPort: 0, + ToPort: 0, + CIDR: "10.10.12.0/24", + } + + group, err := defsecrules.Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &defsecrules.DefaultRule{ + ID: ruleID, + FromPort: 0, + ToPort: 0, + IPProtocol: "ICMP", + IPRange: secgroups.IPRange{CIDR: "10.10.12.0/24"}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetRuleResponse(t, ruleID) + + group, err := defsecrules.Get(client.ServiceClient(), ruleID).Extract() + th.AssertNoErr(t, err) + + expected := &defsecrules.DefaultRule{ + ID: ruleID, + FromPort: 80, + ToPort: 80, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "10.10.12.0/24"}, + } + + th.AssertDeepEquals(t, expected, group) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteRuleResponse(t, ruleID) + + err := defsecrules.Delete(client.ServiceClient(), ruleID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go new file mode 100644 index 000000000000..e5fbf8245408 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/defsecrules/urls.go @@ -0,0 +1,13 @@ +package defsecrules + +import "github.com/gophercloud/gophercloud" + +const rulepath = "os-security-group-default-rules" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go new file mode 100644 index 000000000000..00e7c3becffd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/delegate.go @@ -0,0 +1,23 @@ +package extensions + +import ( + "github.com/gophercloud/gophercloud" + common "github.com/gophercloud/gophercloud/openstack/common/extensions" + "github.com/gophercloud/gophercloud/pagination" +) + +// ExtractExtensions interprets a Page as a slice of Extensions. +func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { + return common.ExtractExtensions(page) +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { + return common.Get(c, alias) +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go new file mode 100644 index 000000000000..ed9cc6f73357 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/doc.go @@ -0,0 +1,46 @@ +/* +Package diskconfig provides information and interaction with the Disk Config +extension that works with the OpenStack Compute service. + +Example of Obtaining the Disk Config of a Server + + type ServerWithDiskConfig { + servers.Server + diskconfig.ServerDiskConfigExt + } + + var allServers []ServerWithDiskConfig + + allPages, err := servers.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve servers: %s", err) + } + + err = servers.ExtractServersInto(allPages, &allServers) + if err != nil { + panic("Unable to extract servers: %s", err) + } + + for _, server := range allServers { + fmt.Println(server.DiskConfig) + } + +Example of Creating a Server with Disk Config + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := diskconfig.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + DiskConfig: diskconfig.Manual, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic("Unable to create server: %s", err) + } +*/ +package diskconfig diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go new file mode 100644 index 000000000000..cc04aed6f31d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/requests.go @@ -0,0 +1,106 @@ +package diskconfig + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// DiskConfig represents one of the two possible settings for the DiskConfig +// option when creating, rebuilding, or resizing servers: Auto or Manual. +type DiskConfig string + +const ( + // Auto builds a server with a single partition the size of the target flavor + // disk and automatically adjusts the filesystem to fit the entire partition. + // Auto may only be used with images and servers that use a single EXT3 + // partition. + Auto DiskConfig = "AUTO" + + // Manual builds a server using whatever partition scheme and filesystem are + // present in the source image. If the target flavor disk is larger, the + // remaining space is left unpartitioned. This enables images to have non-EXT3 + // filesystems, multiple partitions, and so on, and enables you to manage the + // disk configuration. It also results in slightly shorter boot times. + Manual DiskConfig = "MANUAL" +) + +// CreateOptsExt adds a DiskConfig option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // DiskConfig [optional] controls how the created server's disk is partitioned. + DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"` +} + +// ToServerCreateMap adds the diskconfig option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if string(opts.DiskConfig) == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} + +// RebuildOptsExt adds a DiskConfig option to the base RebuildOpts. +type RebuildOptsExt struct { + servers.RebuildOptsBuilder + + // DiskConfig controls how the rebuilt server's disk is partitioned. + DiskConfig DiskConfig `json:"OS-DCF:diskConfig,omitempty"` +} + +// ToServerRebuildMap adds the diskconfig option to the base server rebuild options. +func (opts RebuildOptsExt) ToServerRebuildMap() (map[string]interface{}, error) { + if opts.DiskConfig != Auto && opts.DiskConfig != Manual { + err := gophercloud.ErrInvalidInput{} + err.Argument = "diskconfig.RebuildOptsExt.DiskConfig" + err.Info = "Must be either diskconfig.Auto or diskconfig.Manual" + return nil, err + } + + base, err := opts.RebuildOptsBuilder.ToServerRebuildMap() + if err != nil { + return nil, err + } + + serverMap := base["rebuild"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} + +// ResizeOptsExt adds a DiskConfig option to the base server resize options. +type ResizeOptsExt struct { + servers.ResizeOptsBuilder + + // DiskConfig [optional] controls how the resized server's disk is partitioned. + DiskConfig DiskConfig +} + +// ToServerResizeMap adds the diskconfig option to the base server creation options. +func (opts ResizeOptsExt) ToServerResizeMap() (map[string]interface{}, error) { + if opts.DiskConfig != Auto && opts.DiskConfig != Manual { + err := gophercloud.ErrInvalidInput{} + err.Argument = "diskconfig.ResizeOptsExt.DiskConfig" + err.Info = "Must be either diskconfig.Auto or diskconfig.Manual" + return nil, err + } + + base, err := opts.ResizeOptsBuilder.ToServerResizeMap() + if err != nil { + return nil, err + } + + serverMap := base["resize"].(map[string]interface{}) + serverMap["OS-DCF:diskConfig"] = string(opts.DiskConfig) + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go new file mode 100644 index 000000000000..239b2683d7cd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/results.go @@ -0,0 +1,6 @@ +package diskconfig + +type ServerDiskConfigExt struct { + // DiskConfig is the disk configuration of the server. + DiskConfig DiskConfig `json:"OS-DCF:diskConfig"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/doc.go new file mode 100644 index 000000000000..52ab247565da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/doc.go @@ -0,0 +1,2 @@ +// diskconfig unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/requests_test.go new file mode 100644 index 000000000000..6ce560aa61ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig/testing/requests_test.go @@ -0,0 +1,88 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + ext := diskconfig.CreateOptsExt{ + CreateOptsBuilder: base, + DiskConfig: diskconfig.Manual, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1", + "OS-DCF:diskConfig": "MANUAL" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} + +func TestRebuildOpts(t *testing.T) { + base := servers.RebuildOpts{ + Name: "rebuiltserver", + AdminPass: "swordfish", + ImageID: "asdfasdfasdf", + } + + ext := diskconfig.RebuildOptsExt{ + RebuildOptsBuilder: base, + DiskConfig: diskconfig.Auto, + } + + actual, err := ext.ToServerRebuildMap() + th.AssertNoErr(t, err) + + expected := ` + { + "rebuild": { + "name": "rebuiltserver", + "imageRef": "asdfasdfasdf", + "adminPass": "swordfish", + "OS-DCF:diskConfig": "AUTO" + } + } + ` + th.CheckJSONEquals(t, expected, actual) +} + +func TestResizeOpts(t *testing.T) { + base := servers.ResizeOpts{ + FlavorRef: "performance1-8", + } + + ext := diskconfig.ResizeOptsExt{ + ResizeOptsBuilder: base, + DiskConfig: diskconfig.Auto, + } + + actual, err := ext.ToServerResizeMap() + th.AssertNoErr(t, err) + + expected := ` + { + "resize": { + "flavorRef": "performance1-8", + "OS-DCF:diskConfig": "AUTO" + } + } + ` + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go new file mode 100644 index 000000000000..2b447da1d658 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/doc.go @@ -0,0 +1,3 @@ +// Package extensions provides information and interaction with the +// different extensions available for the OpenStack Compute service. +package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/doc.go new file mode 100644 index 000000000000..faafe7c313ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/doc.go @@ -0,0 +1,13 @@ +/* +Package evacuate provides functionality to evacuates servers that have been +provisioned by the OpenStack Compute service from a failed host to a new host. + +Example to Evacuate a Server from a Host + + serverID := "b16ba811-199d-4ffd-8839-ba96c1185a67" + err := evacuate.Evacuate(computeClient, serverID, evacuate.EvacuateOpts{}).ExtractErr() + if err != nil { + panic(err) + } +*/ +package evacuate diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/requests.go new file mode 100644 index 000000000000..3ea7af4642fb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/requests.go @@ -0,0 +1,41 @@ +package evacuate + +import ( + "github.com/gophercloud/gophercloud" +) + +// EvacuateOptsBuilder allows extensions to add additional parameters to the +// the Evacuate request. +type EvacuateOptsBuilder interface { + ToEvacuateMap() (map[string]interface{}, error) +} + +// EvacuateOpts specifies Evacuate action parameters. +type EvacuateOpts struct { + // The name of the host to which the server is evacuated + Host string `json:"host,omitempty"` + + // Indicates whether server is on shared storage + OnSharedStorage bool `json:"onSharedStorage"` + + // An administrative password to access the evacuated server + AdminPass string `json:"adminPass,omitempty"` +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts EvacuateOpts) ToEvacuateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "evacuate") +} + +// Evacuate will Evacuate a failed instance to another host. +func Evacuate(client *gophercloud.ServiceClient, id string, opts EvacuateOptsBuilder) (r EvacuateResult) { + b, err := opts.ToEvacuateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/results.go new file mode 100644 index 000000000000..8342cb43d0c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/results.go @@ -0,0 +1,23 @@ +package evacuate + +import ( + "github.com/gophercloud/gophercloud" +) + +// EvacuateResult is the response from an Evacuate operation. +//Call its ExtractAdminPass method to retrieve the admin password of the instance. +//The admin password will be an empty string if the cloud is not configured to inject admin passwords.. +type EvacuateResult struct { + gophercloud.Result +} + +func (r EvacuateResult) ExtractAdminPass() (string, error) { + var s struct { + AdminPass string `json:"adminPass"` + } + err := r.ExtractInto(&s) + if err != nil && err.Error() == "EOF" { + return "", nil + } + return s.AdminPass, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/doc.go new file mode 100644 index 000000000000..613ac1d4b67f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/doc.go @@ -0,0 +1,2 @@ +// compute_extensions_evacuate_v2 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/fixtures.go new file mode 100644 index 000000000000..e078d1019d3d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/fixtures.go @@ -0,0 +1,83 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockEvacuateResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "evacuate": { + "adminPass": "MySecretPass", + "host": "derp", + "onSharedStorage": false + } + + } + `) + w.WriteHeader(http.StatusOK) + }) +} + +func mockEvacuateResponseWithHost(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "evacuate": { + "host": "derp", + "onSharedStorage": false + } + + } + `) + w.WriteHeader(http.StatusOK) + }) +} + +func mockEvacuateResponseWithNoOpts(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "evacuate": { + "onSharedStorage": false + } + + } + `) + w.WriteHeader(http.StatusOK) + }) +} + +const EvacuateResponse = ` +{ + "adminPass": "MySecretPass" +} +` + +func mockEvacuateAdminpassResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "evacuate": { + "onSharedStorage": false + } + } + `) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, EvacuateResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/requests_test.go new file mode 100644 index 000000000000..aec03114b806 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/testing/requests_test.go @@ -0,0 +1,60 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestEvacuate(t *testing.T) { + const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEvacuateResponse(t, serverID) + + _, err := evacuate.Evacuate(client.ServiceClient(), serverID, evacuate.EvacuateOpts{ + Host: "derp", + AdminPass: "MySecretPass", + OnSharedStorage: false, + }).ExtractAdminPass() + th.AssertNoErr(t, err) +} + +func TestEvacuateWithHost(t *testing.T) { + const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEvacuateResponseWithHost(t, serverID) + + _, err := evacuate.Evacuate(client.ServiceClient(), serverID, evacuate.EvacuateOpts{ + Host: "derp", + }).ExtractAdminPass() + th.AssertNoErr(t, err) +} + +func TestEvacuateWithNoOpts(t *testing.T) { + const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEvacuateResponseWithNoOpts(t, serverID) + + _, err := evacuate.Evacuate(client.ServiceClient(), serverID, evacuate.EvacuateOpts{}).ExtractAdminPass() + th.AssertNoErr(t, err) +} + +func TestEvacuateAdminpassResponse(t *testing.T) { + const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + th.SetupHTTP() + defer th.TeardownHTTP() + + mockEvacuateAdminpassResponse(t, serverID) + + actual, err := evacuate.Evacuate(client.ServiceClient(), serverID, evacuate.EvacuateOpts{}).ExtractAdminPass() + th.CheckEquals(t, "MySecretPass", actual) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/urls.go new file mode 100644 index 000000000000..a8896aea0bae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/evacuate/urls.go @@ -0,0 +1,9 @@ +package evacuate + +import ( + "github.com/gophercloud/gophercloud" +) + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/doc.go new file mode 100644 index 000000000000..9626f4e18644 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/doc.go @@ -0,0 +1,20 @@ +/* +Package extendedserverattributes provides the ability to extend a +server result with the extended usage information. + +Example to Get an extended information: + + type serverAttributesExt struct { + servers.Server + extendedserverattributes.ServerAttributesExt + } + var serverWithAttributesExt serverAttributesExt + + err := servers.Get(computeClient, "d650a0ce-17c3-497d-961a-43c4af80998a").ExtractInto(&serverWithAttributesExt) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", serverWithAttributesExt) +*/ +package extendedserverattributes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/results.go new file mode 100644 index 000000000000..abdf7a8b76db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/results.go @@ -0,0 +1,19 @@ +package extendedserverattributes + +// ServerAttributesExt represents OS-EXT-SRV-ATTR server response fields. +// +// Following fields will be added after implementing full API microversion +// support in the Gophercloud: +// +// - OS-EXT-SRV-ATTR:reservation_id" +// - OS-EXT-SRV-ATTR:launch_index" +// - OS-EXT-SRV-ATTR:hostname" +// - OS-EXT-SRV-ATTR:kernel_id" +// - OS-EXT-SRV-ATTR:ramdisk_id" +// - OS-EXT-SRV-ATTR:root_device_name" +// - OS-EXT-SRV-ATTR:user_data" +type ServerAttributesExt struct { + Host string `json:"OS-EXT-SRV-ATTR:host"` + InstanceName string `json:"OS-EXT-SRV-ATTR:instance_name"` + HypervisorHostname string `json:"OS-EXT-SRV-ATTR:hypervisor_hostname"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/fixtures.go new file mode 100644 index 000000000000..c2f0b83e80cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/fixtures.go @@ -0,0 +1,28 @@ +package testing + +// ServerWithAttributesExtResult represents a raw server response from the +// Compute API with OS-EXT-SRV-ATTR data. +// Most of the actual fields were deleted from the response. +const ServerWithAttributesExtResult = ` +{ + "server": { + "OS-EXT-SRV-ATTR:user_data": "", + "OS-EXT-SRV-ATTR:instance_name": "instance-00000001", + "OS-EXT-SRV-ATTR:root_device_name": "/dev/sda", + "OS-EXT-SRV-ATTR:hostname": "test00", + "OS-EXT-SRV-ATTR:reservation_id": "r-ky9gim1l", + "OS-EXT-SRV-ATTR:ramdisk_id": "", + "OS-EXT-SRV-ATTR:host": "compute01", + "OS-EXT-SRV-ATTR:kernel_id": "", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "compute01", + "OS-EXT-SRV-ATTR:launch_index": 0, + "created": "2018-07-27T09:15:48Z", + "updated": "2018-07-27T09:15:55Z", + "id": "d650a0ce-17c3-497d-961a-43c4af80998a", + "name": "test_instance", + "status": "ACTIVE", + "user_id": "0f2f3822679e4b3ea073e5d1c6ed5f02", + "tenant_id": "424e7cf0243c468ca61732ba45973b3e" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/requests_test.go new file mode 100644 index 000000000000..0fa2b3d0d639 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes/testing/requests_test.go @@ -0,0 +1,37 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedserverattributes" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestServerWithUsageExt(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/d650a0ce-17c3-497d-961a-43c4af80998a", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, ServerWithAttributesExtResult) + }) + + type serverAttributesExt struct { + servers.Server + extendedserverattributes.ServerAttributesExt + } + var serverWithAttributesExt serverAttributesExt + err := servers.Get(fake.ServiceClient(), "d650a0ce-17c3-497d-961a-43c4af80998a").ExtractInto(&serverWithAttributesExt) + th.AssertNoErr(t, err) + + th.AssertEquals(t, serverWithAttributesExt.Host, "compute01") + th.AssertEquals(t, serverWithAttributesExt.InstanceName, "instance-00000001") + th.AssertEquals(t, serverWithAttributesExt.HypervisorHostname, "compute01") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/doc.go new file mode 100644 index 000000000000..33b1e35cd6b4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/doc.go @@ -0,0 +1,28 @@ +/* +Package extendedstatus provides the ability to extend a server result with +the extended status information. Example: + + type ServerWithExt struct { + servers.Server + extendedstatus.ServerExtendedStatusExt + } + + var allServers []ServerWithExt + + allPages, err := servers.List(client, nil).AllPages() + if err != nil { + panic("Unable to retrieve servers: %s", err) + } + + err = servers.ExtractServersInto(allPages, &allServers) + if err != nil { + panic("Unable to extract servers: %s", err) + } + + for _, server := range allServers { + fmt.Println(server.TaskState) + fmt.Println(server.VmState) + fmt.Println(server.PowerState) + } +*/ +package extendedstatus diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/results.go new file mode 100644 index 000000000000..acfbd3fb242b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus/results.go @@ -0,0 +1,41 @@ +package extendedstatus + +type PowerState int + +type ServerExtendedStatusExt struct { + TaskState string `json:"OS-EXT-STS:task_state"` + VmState string `json:"OS-EXT-STS:vm_state"` + PowerState PowerState `json:"OS-EXT-STS:power_state"` +} + +const ( + NOSTATE = iota + RUNNING + _UNUSED1 + PAUSED + SHUTDOWN + _UNUSED2 + CRASHED + SUSPENDED +) + +func (r PowerState) String() string { + switch r { + case NOSTATE: + return "NOSTATE" + case RUNNING: + return "RUNNING" + case PAUSED: + return "PAUSED" + case SHUTDOWN: + return "SHUTDOWN" + case CRASHED: + return "CRASHED" + case SUSPENDED: + return "SUSPENDED" + case _UNUSED1, _UNUSED2: + return "_UNUSED" + default: + return "N/A" + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go new file mode 100644 index 000000000000..f5dbdbf8b944 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/doc.go @@ -0,0 +1,68 @@ +/* +Package floatingips provides the ability to manage floating ips through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environements that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Floating IPs + + allPages, err := floatingips.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allFloatingIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFloatingIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + Pool: "nova", + } + + fip, err := floatingips.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + err := floatingips.Delete(computeClient, "floatingip-id").ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Floating IP With a Server + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.AssociateInstance(computeClient, "server-id", associateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP From a Server + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.DisassociateInstance(computeClient, "server-id", disassociateOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go new file mode 100644 index 000000000000..a922639dec90 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/requests.go @@ -0,0 +1,114 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of FloatingIPs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies a Floating IP allocation request. +type CreateOpts struct { + // Pool is the pool of Floating IPs to allocate one from. + Pool string `json:"pool" required:"true"` +} + +// ToFloatingIPCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new Floating IP. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created Floating IP. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous allocated Floating IP. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// AssociateOptsBuilder allows extensions to add additional parameters to the +// Associate request. +type AssociateOptsBuilder interface { + ToFloatingIPAssociateMap() (map[string]interface{}, error) +} + +// AssociateOpts specifies the required information to associate a Floating IP with an instance +type AssociateOpts struct { + // FloatingIP is the Floating IP to associate with an instance. + FloatingIP string `json:"address" required:"true"` + + // FixedIP is an optional fixed IP address of the server. + FixedIP string `json:"fixed_address,omitempty"` +} + +// ToFloatingIPAssociateMap constructs a request body from AssociateOpts. +func (opts AssociateOpts) ToFloatingIPAssociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addFloatingIp") +} + +// AssociateInstance pairs an allocated Floating IP with a server. +func AssociateInstance(client *gophercloud.ServiceClient, serverID string, opts AssociateOptsBuilder) (r AssociateResult) { + b, err := opts.ToFloatingIPAssociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(associateURL(client, serverID), b, nil, nil) + return +} + +// DisassociateOptsBuilder allows extensions to add additional parameters to +// the Disassociate request. +type DisassociateOptsBuilder interface { + ToFloatingIPDisassociateMap() (map[string]interface{}, error) +} + +// DisassociateOpts specifies the required information to disassociate a +// Floating IP with a server. +type DisassociateOpts struct { + FloatingIP string `json:"address" required:"true"` +} + +// ToFloatingIPDisassociateMap constructs a request body from DisassociateOpts. +func (opts DisassociateOpts) ToFloatingIPDisassociateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeFloatingIp") +} + +// DisassociateInstance decouples an allocated Floating IP from an instance +func DisassociateInstance(client *gophercloud.ServiceClient, serverID string, opts DisassociateOptsBuilder) (r DisassociateResult) { + b, err := opts.ToFloatingIPDisassociateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(disassociateURL(client, serverID), b, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go new file mode 100644 index 000000000000..da4e9da0e62e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/results.go @@ -0,0 +1,115 @@ +package floatingips + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A FloatingIP is an IP that can be associated with a server. +type FloatingIP struct { + // ID is a unique ID of the Floating IP + ID string `json:"-"` + + // FixedIP is a specific IP on the server to pair the Floating IP with. + FixedIP string `json:"fixed_ip,omitempty"` + + // InstanceID is the ID of the server that is using the Floating IP. + InstanceID string `json:"instance_id"` + + // IP is the actual Floating IP. + IP string `json:"ip"` + + // Pool is the pool of Floating IPs that this Floating IP belongs to. + Pool string `json:"pool"` +} + +func (r *FloatingIP) UnmarshalJSON(b []byte) error { + type tmp FloatingIP + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = FloatingIP(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// FloatingIPPage stores a single page of FloatingIPs from a List call. +type FloatingIPPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a FloatingIPsPage is empty. +func (page FloatingIPPage) IsEmpty() (bool, error) { + va, err := ExtractFloatingIPs(page) + return len(va) == 0, err +} + +// ExtractFloatingIPs interprets a page of results as a slice of FloatingIPs. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floating_ips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} + +// FloatingIPResult is the raw result from a FloatingIP request. +type FloatingIPResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any FloatingIP resource +// response as a FloatingIP struct. +func (r FloatingIPResult) Extract() (*FloatingIP, error) { + var s struct { + FloatingIP *FloatingIP `json:"floating_ip"` + } + err := r.ExtractInto(&s) + return s.FloatingIP, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a FloatingIP. +type CreateResult struct { + FloatingIPResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a FloatingIP. +type GetResult struct { + FloatingIPResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type AssociateResult struct { + gophercloud.ErrResult +} + +// DisassociateResult is the response from a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DisassociateResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/doc.go new file mode 100644 index 000000000000..82dfbe7fed3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/doc.go @@ -0,0 +1,2 @@ +// floatingips unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/fixtures.go new file mode 100644 index 000000000000..6866e265defc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/fixtures.go @@ -0,0 +1,223 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "floating_ips": [ + { + "fixed_ip": null, + "id": "1", + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + }, + { + "fixed_ip": "166.78.185.201", + "id": "2", + "instance_id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "ip": "10.10.10.2", + "pool": "nova" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "floating_ip": { + "fixed_ip": "166.78.185.201", + "id": "2", + "instance_id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "ip": "10.10.10.2", + "pool": "nova" + } +} +` + +// CreateOutput is a sample response to a Post call +const CreateOutput = ` +{ + "floating_ip": { + "fixed_ip": null, + "id": "1", + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + } +} +` + +// CreateOutputWithNumericID is a sample response to a Post call +// with a legacy nova-network-based numeric ID. +const CreateOutputWithNumericID = ` +{ + "floating_ip": { + "fixed_ip": null, + "id": 1, + "instance_id": null, + "ip": "10.10.10.1", + "pool": "nova" + } +} +` + +// FirstFloatingIP is the first result in ListOutput. +var FirstFloatingIP = floatingips.FloatingIP{ + ID: "1", + IP: "10.10.10.1", + Pool: "nova", +} + +// SecondFloatingIP is the first result in ListOutput. +var SecondFloatingIP = floatingips.FloatingIP{ + FixedIP: "166.78.185.201", + ID: "2", + InstanceID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + IP: "10.10.10.2", + Pool: "nova", +} + +// ExpectedFloatingIPsSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedFloatingIPsSlice = []floatingips.FloatingIP{FirstFloatingIP, SecondFloatingIP} + +// CreatedFloatingIP is the parsed result from CreateOutput. +var CreatedFloatingIP = floatingips.FloatingIP{ + ID: "1", + IP: "10.10.10.1", + Pool: "nova", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing floating ip +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips/2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new floating ip +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "pool": "nova" +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleCreateWithNumericIDSuccessfully configures the test server to respond to a Create request +// for a new floating ip +func HandleCreateWithNumericIDSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "pool": "nova" +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutputWithNumericID) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing floating ip +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-floating-ips/1", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleAssociateSuccessfully configures the test server to respond to a Post request +// to associate an allocated floating IP +func HandleAssociateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "addFloatingIp": { + "address": "10.10.10.2" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleFixedAssociateSucessfully configures the test server to respond to a Post request +// to associate an allocated floating IP with a specific fixed IP address +func HandleAssociateFixedSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "addFloatingIp": { + "address": "10.10.10.2", + "fixed_address": "166.78.185.201" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDisassociateSuccessfully configures the test server to respond to a Post request +// to disassociate an allocated floating IP +func HandleDisassociateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "removeFloatingIp": { + "address": "10.10.10.2" + } +} +`) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/requests_test.go new file mode 100644 index 000000000000..2356671e0269 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/testing/requests_test.go @@ -0,0 +1,111 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := floatingips.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := floatingips.ExtractFloatingIPs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedFloatingIPsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := floatingips.Create(client.ServiceClient(), floatingips.CreateOpts{ + Pool: "nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedFloatingIP, actual) +} + +func TestCreateWithNumericID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateWithNumericIDSuccessfully(t) + + actual, err := floatingips.Create(client.ServiceClient(), floatingips.CreateOpts{ + Pool: "nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedFloatingIP, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := floatingips.Get(client.ServiceClient(), "2").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondFloatingIP, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := floatingips.Delete(client.ServiceClient(), "1").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAssociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssociateSuccessfully(t) + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.AssociateInstance(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0", associateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAssociateFixed(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssociateFixedSuccessfully(t) + + associateOpts := floatingips.AssociateOpts{ + FloatingIP: "10.10.10.2", + FixedIP: "166.78.185.201", + } + + err := floatingips.AssociateInstance(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0", associateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDisassociateInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDisassociateSuccessfully(t) + + disassociateOpts := floatingips.DisassociateOpts{ + FloatingIP: "10.10.10.2", + } + + err := floatingips.DisassociateInstance(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0", disassociateOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go new file mode 100644 index 000000000000..4768e5a89769 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/floatingips/urls.go @@ -0,0 +1,37 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-floating-ips" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} + +func serverURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers/" + serverID + "/action") +} + +func associateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} + +func disassociateURL(c *gophercloud.ServiceClient, serverID string) string { + return serverURL(c, serverID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go new file mode 100644 index 000000000000..b8eb699edbe3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/doc.go @@ -0,0 +1,51 @@ +/* +Package hypervisors returns details about list of hypervisors, shows details for a hypervisor +and shows summary statistics for all hypervisors over all compute nodes in the OpenStack cloud. + +Example of Show Hypervisor Details + + hypervisorID := 42 + hypervisor, err := hypervisors.Get(computeClient, 42).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", hypervisor) + +Example of Retrieving Details of All Hypervisors + + allPages, err := hypervisors.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allHypervisors, err := hypervisors.ExtractHypervisors(allPages) + if err != nil { + panic(err) + } + + for _, hypervisor := range allHypervisors { + fmt.Printf("%+v\n", hypervisor) + } + +Example of Show Hypervisor Statistics + + hypervisorsStatistics, err := hypervisors.GetStatistics(computeClient).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", hypervisorsStatistics) + +Example of Show Hypervisor Uptime + + hypervisorID := 42 + hypervisorUptime, err := hypervisors.GetUptime(computeClient, hypervisorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", hypervisorUptime) + +*/ +package hypervisors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go new file mode 100644 index 000000000000..b6f1c541cb22 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/requests.go @@ -0,0 +1,41 @@ +package hypervisors + +import ( + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the API to list hypervisors. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, hypervisorsListDetailURL(client), func(r pagination.PageResult) pagination.Page { + return HypervisorPage{pagination.SinglePageBase(r)} + }) +} + +// Statistics makes a request against the API to get hypervisors statistics. +func GetStatistics(client *gophercloud.ServiceClient) (r StatisticsResult) { + _, r.Err = client.Get(hypervisorsStatisticsURL(client), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get makes a request against the API to get details for specific hypervisor. +func Get(client *gophercloud.ServiceClient, hypervisorID int) (r HypervisorResult) { + v := strconv.Itoa(hypervisorID) + _, r.Err = client.Get(hypervisorsGetURL(client, v), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// GetUptime makes a request against the API to get uptime for specific hypervisor. +func GetUptime(client *gophercloud.ServiceClient, hypervisorID int) (r UptimeResult) { + v := strconv.Itoa(hypervisorID) + _, r.Err = client.Get(hypervisorsUptimeURL(client, v), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go new file mode 100644 index 000000000000..7f3fafe1aa13 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/results.go @@ -0,0 +1,290 @@ +package hypervisors + +import ( + "encoding/json" + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Topology represents a CPU Topology. +type Topology struct { + Sockets int `json:"sockets"` + Cores int `json:"cores"` + Threads int `json:"threads"` +} + +// CPUInfo represents CPU information of the hypervisor. +type CPUInfo struct { + Vendor string `json:"vendor"` + Arch string `json:"arch"` + Model string `json:"model"` + Features []string `json:"features"` + Topology Topology `json:"topology"` +} + +// Service represents a Compute service running on the hypervisor. +type Service struct { + Host string `json:"host"` + ID int `json:"id"` + DisabledReason string `json:"disabled_reason"` +} + +// Hypervisor represents a hypervisor in the OpenStack cloud. +type Hypervisor struct { + // A structure that contains cpu information like arch, model, vendor, + // features and topology. + CPUInfo CPUInfo `json:"-"` + + // The current_workload is the number of tasks the hypervisor is responsible + // for. This will be equal or greater than the number of active VMs on the + // system (it can be greater when VMs are being deleted and the hypervisor is + // still cleaning up). + CurrentWorkload int `json:"current_workload"` + + // Status of the hypervisor, either "enabled" or "disabled". + Status string `json:"status"` + + // State of the hypervisor, either "up" or "down". + State string `json:"state"` + + // DiskAvailableLeast is the actual free disk on this hypervisor, + // measured in GB. + DiskAvailableLeast int `json:"disk_available_least"` + + // HostIP is the hypervisor's IP address. + HostIP string `json:"host_ip"` + + // FreeDiskGB is the free disk remaining on the hypervisor, measured in GB. + FreeDiskGB int `json:"-"` + + // FreeRAMMB is the free RAM in the hypervisor, measured in MB. + FreeRamMB int `json:"free_ram_mb"` + + // HypervisorHostname is the hostname of the hypervisor. + HypervisorHostname string `json:"hypervisor_hostname"` + + // HypervisorType is the type of hypervisor. + HypervisorType string `json:"hypervisor_type"` + + // HypervisorVersion is the version of the hypervisor. + HypervisorVersion int `json:"-"` + + // ID is the unique ID of the hypervisor. + ID int `json:"id"` + + // LocalGB is the disk space in the hypervisor, measured in GB. + LocalGB int `json:"-"` + + // LocalGBUsed is the used disk space of the hypervisor, measured in GB. + LocalGBUsed int `json:"local_gb_used"` + + // MemoryMB is the total memory of the hypervisor, measured in MB. + MemoryMB int `json:"memory_mb"` + + // MemoryMBUsed is the used memory of the hypervisor, measured in MB. + MemoryMBUsed int `json:"memory_mb_used"` + + // RunningVMs is the The number of running vms on the hypervisor. + RunningVMs int `json:"running_vms"` + + // Service is the service this hypervisor represents. + Service Service `json:"service"` + + // VCPUs is the total number of vcpus on the hypervisor. + VCPUs int `json:"vcpus"` + + // VCPUsUsed is the number of used vcpus on the hypervisor. + VCPUsUsed int `json:"vcpus_used"` +} + +func (r *Hypervisor) UnmarshalJSON(b []byte) error { + type tmp Hypervisor + var s struct { + tmp + CPUInfo interface{} `json:"cpu_info"` + HypervisorVersion interface{} `json:"hypervisor_version"` + FreeDiskGB interface{} `json:"free_disk_gb"` + LocalGB interface{} `json:"local_gb"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Hypervisor(s.tmp) + + // Newer versions return the CPU info as the correct type. + // Older versions return the CPU info as a string and need to be + // unmarshalled by the json parser. + var tmpb []byte + + switch t := s.CPUInfo.(type) { + case string: + tmpb = []byte(t) + case map[string]interface{}: + tmpb, err = json.Marshal(t) + if err != nil { + return err + } + default: + return fmt.Errorf("CPUInfo has unexpected type: %T", t) + } + + err = json.Unmarshal(tmpb, &r.CPUInfo) + if err != nil { + return err + } + + // These fields may be returned as a scientific notation, so they need + // converted to int. + switch t := s.HypervisorVersion.(type) { + case int: + r.HypervisorVersion = t + case float64: + r.HypervisorVersion = int(t) + default: + return fmt.Errorf("Hypervisor version of unexpected type") + } + + switch t := s.FreeDiskGB.(type) { + case int: + r.FreeDiskGB = t + case float64: + r.FreeDiskGB = int(t) + default: + return fmt.Errorf("Free disk GB of unexpected type") + } + + switch t := s.LocalGB.(type) { + case int: + r.LocalGB = t + case float64: + r.LocalGB = int(t) + default: + return fmt.Errorf("Local GB of unexpected type") + } + + return nil +} + +// HypervisorPage represents a single page of all Hypervisors from a List +// request. +type HypervisorPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a HypervisorPage is empty. +func (page HypervisorPage) IsEmpty() (bool, error) { + va, err := ExtractHypervisors(page) + return len(va) == 0, err +} + +// ExtractHypervisors interprets a page of results as a slice of Hypervisors. +func ExtractHypervisors(p pagination.Page) ([]Hypervisor, error) { + var h struct { + Hypervisors []Hypervisor `json:"hypervisors"` + } + err := (p.(HypervisorPage)).ExtractInto(&h) + return h.Hypervisors, err +} + +type HypervisorResult struct { + gophercloud.Result +} + +// Extract interprets any HypervisorResult as a Hypervisor, if possible. +func (r HypervisorResult) Extract() (*Hypervisor, error) { + var s struct { + Hypervisor Hypervisor `json:"hypervisor"` + } + err := r.ExtractInto(&s) + return &s.Hypervisor, err +} + +// Statistics represents a summary statistics for all enabled +// hypervisors over all compute nodes in the OpenStack cloud. +type Statistics struct { + // The number of hypervisors. + Count int `json:"count"` + + // The current_workload is the number of tasks the hypervisor is responsible for + CurrentWorkload int `json:"current_workload"` + + // The actual free disk on this hypervisor(in GB). + DiskAvailableLeast int `json:"disk_available_least"` + + // The free disk remaining on this hypervisor(in GB). + FreeDiskGB int `json:"free_disk_gb"` + + // The free RAM in this hypervisor(in MB). + FreeRamMB int `json:"free_ram_mb"` + + // The disk in this hypervisor(in GB). + LocalGB int `json:"local_gb"` + + // The disk used in this hypervisor(in GB). + LocalGBUsed int `json:"local_gb_used"` + + // The memory of this hypervisor(in MB). + MemoryMB int `json:"memory_mb"` + + // The memory used in this hypervisor(in MB). + MemoryMBUsed int `json:"memory_mb_used"` + + // The total number of running vms on all hypervisors. + RunningVMs int `json:"running_vms"` + + // The number of vcpu in this hypervisor. + VCPUs int `json:"vcpus"` + + // The number of vcpu used in this hypervisor. + VCPUsUsed int `json:"vcpus_used"` +} + +type StatisticsResult struct { + gophercloud.Result +} + +// Extract interprets any StatisticsResult as a Statistics, if possible. +func (r StatisticsResult) Extract() (*Statistics, error) { + var s struct { + Stats Statistics `json:"hypervisor_statistics"` + } + err := r.ExtractInto(&s) + return &s.Stats, err +} + +// Uptime represents uptime and additional info for a specific hypervisor. +type Uptime struct { + // The hypervisor host name provided by the Nova virt driver. + // For the Ironic driver, it is the Ironic node uuid. + HypervisorHostname string `json:"hypervisor_hostname"` + + // The id of the hypervisor. + ID int `json:"id"` + + // The state of the hypervisor. One of up or down. + State string `json:"state"` + + // The status of the hypervisor. One of enabled or disabled. + Status string `json:"status"` + + // The total uptime of the hypervisor and information about average load. + Uptime string `json:"uptime"` +} + +type UptimeResult struct { + gophercloud.Result +} + +// Extract interprets any UptimeResult as a Uptime, if possible. +func (r UptimeResult) Extract() (*Uptime, error) { + var s struct { + Uptime Uptime `json:"hypervisor"` + } + err := r.ExtractInto(&s) + return &s.Uptime, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/fixtures.go new file mode 100644 index 000000000000..1dc05fb9b0a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/fixtures.go @@ -0,0 +1,265 @@ +package testing + +import ( + "fmt" + "net/http" + "strconv" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// The first hypervisor represents what the specification says (~Newton) +// The second is exactly the same, but what you can get off a real system (~Kilo) +const HypervisorListBody = ` +{ + "hypervisors": [ + { + "cpu_info": { + "arch": "x86_64", + "model": "Nehalem", + "vendor": "Intel", + "features": [ + "pge", + "clflush" + ], + "topology": { + "cores": 1, + "threads": 1, + "sockets": 4 + } + }, + "current_workload": 0, + "status": "enabled", + "state": "up", + "disk_available_least": 0, + "host_ip": "1.1.1.1", + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 2002000, + "id": 1, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "service": { + "host": "e6a37ee802d74863ab8b91ade8f12a67", + "id": 2, + "disabled_reason": null + }, + "vcpus": 1, + "vcpus_used": 0 + }, + { + "cpu_info": "{\"arch\": \"x86_64\", \"model\": \"Nehalem\", \"vendor\": \"Intel\", \"features\": [\"pge\", \"clflush\"], \"topology\": {\"cores\": 1, \"threads\": 1, \"sockets\": 4}}", + "current_workload": 0, + "status": "enabled", + "state": "up", + "disk_available_least": 0, + "host_ip": "1.1.1.1", + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 2.002e+06, + "id": 1, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "service": { + "host": "e6a37ee802d74863ab8b91ade8f12a67", + "id": 2, + "disabled_reason": null + }, + "vcpus": 1, + "vcpus_used": 0 + } + ] +}` + +const HypervisorsStatisticsBody = ` +{ + "hypervisor_statistics": { + "count": 1, + "current_workload": 0, + "disk_available_least": 0, + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "vcpus": 2, + "vcpus_used": 0 + } +} +` + +const HypervisorGetBody = ` +{ + "hypervisor":{ + "cpu_info":{ + "arch":"x86_64", + "model":"Nehalem", + "vendor":"Intel", + "features":[ + "pge", + "clflush" + ], + "topology":{ + "cores":1, + "threads":1, + "sockets":4 + } + }, + "current_workload":0, + "status":"enabled", + "state":"up", + "disk_available_least":0, + "host_ip":"1.1.1.1", + "free_disk_gb":1028, + "free_ram_mb":7680, + "hypervisor_hostname":"fake-mini", + "hypervisor_type":"fake", + "hypervisor_version":2002000, + "id":1, + "local_gb":1028, + "local_gb_used":0, + "memory_mb":8192, + "memory_mb_used":512, + "running_vms":0, + "service":{ + "host":"e6a37ee802d74863ab8b91ade8f12a67", + "id":2, + "disabled_reason":null + }, + "vcpus":1, + "vcpus_used":0 + } +} +` + +const HypervisorUptimeBody = ` +{ + "hypervisor": { + "hypervisor_hostname": "fake-mini", + "id": 1, + "state": "up", + "status": "enabled", + "uptime": " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14" + } +} +` + +var ( + HypervisorFake = hypervisors.Hypervisor{ + CPUInfo: hypervisors.CPUInfo{ + Arch: "x86_64", + Model: "Nehalem", + Vendor: "Intel", + Features: []string{ + "pge", + "clflush", + }, + Topology: hypervisors.Topology{ + Cores: 1, + Threads: 1, + Sockets: 4, + }, + }, + CurrentWorkload: 0, + Status: "enabled", + State: "up", + DiskAvailableLeast: 0, + HostIP: "1.1.1.1", + FreeDiskGB: 1028, + FreeRamMB: 7680, + HypervisorHostname: "fake-mini", + HypervisorType: "fake", + HypervisorVersion: 2002000, + ID: 1, + LocalGB: 1028, + LocalGBUsed: 0, + MemoryMB: 8192, + MemoryMBUsed: 512, + RunningVMs: 0, + Service: hypervisors.Service{ + Host: "e6a37ee802d74863ab8b91ade8f12a67", + ID: 2, + DisabledReason: "", + }, + VCPUs: 1, + VCPUsUsed: 0, + } + HypervisorsStatisticsExpected = hypervisors.Statistics{ + Count: 1, + CurrentWorkload: 0, + DiskAvailableLeast: 0, + FreeDiskGB: 1028, + FreeRamMB: 7680, + LocalGB: 1028, + LocalGBUsed: 0, + MemoryMB: 8192, + MemoryMBUsed: 512, + RunningVMs: 0, + VCPUs: 2, + VCPUsUsed: 0, + } + HypervisorUptimeExpected = hypervisors.Uptime{ + HypervisorHostname: "fake-mini", + ID: 1, + State: "up", + Status: "enabled", + Uptime: " 08:32:11 up 93 days, 18:25, 12 users, load average: 0.20, 0.12, 0.14", + } +) + +func HandleHypervisorsStatisticsSuccessfully(t *testing.T) { + testhelper.Mux.HandleFunc("/os-hypervisors/statistics", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, HypervisorsStatisticsBody) + }) +} + +func HandleHypervisorListSuccessfully(t *testing.T) { + testhelper.Mux.HandleFunc("/os-hypervisors/detail", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, HypervisorListBody) + }) +} + +func HandleHypervisorGetSuccessfully(t *testing.T) { + v := strconv.Itoa(HypervisorFake.ID) + testhelper.Mux.HandleFunc("/os-hypervisors/"+v, func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, HypervisorGetBody) + }) +} + +func HandleHypervisorUptimeSuccessfully(t *testing.T) { + v := strconv.Itoa(HypervisorFake.ID) + testhelper.Mux.HandleFunc("/os-hypervisors/"+v+"/uptime", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, HypervisorUptimeBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/requests_test.go new file mode 100644 index 000000000000..95f9636c0cd1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/testing/requests_test.go @@ -0,0 +1,89 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors" + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListHypervisors(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleHypervisorListSuccessfully(t) + + pages := 0 + err := hypervisors.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := hypervisors.ExtractHypervisors(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 hypervisors, got %d", len(actual)) + } + testhelper.CheckDeepEquals(t, HypervisorFake, actual[0]) + testhelper.CheckDeepEquals(t, HypervisorFake, actual[1]) + + return true, nil + }) + + testhelper.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllHypervisors(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleHypervisorListSuccessfully(t) + + allPages, err := hypervisors.List(client.ServiceClient()).AllPages() + testhelper.AssertNoErr(t, err) + actual, err := hypervisors.ExtractHypervisors(allPages) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, HypervisorFake, actual[0]) + testhelper.CheckDeepEquals(t, HypervisorFake, actual[1]) +} + +func TestHypervisorsStatistics(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleHypervisorsStatisticsSuccessfully(t) + + expected := HypervisorsStatisticsExpected + + actual, err := hypervisors.GetStatistics(client.ServiceClient()).Extract() + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, &expected, actual) +} + +func TestGetHypervisor(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleHypervisorGetSuccessfully(t) + + expected := HypervisorFake + + actual, err := hypervisors.Get(client.ServiceClient(), expected.ID).Extract() + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, &expected, actual) +} + +func TestHypervisorsUptime(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleHypervisorUptimeSuccessfully(t) + + expected := HypervisorUptimeExpected + + actual, err := hypervisors.GetUptime(client.ServiceClient(), HypervisorFake.ID).Extract() + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, &expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/urls.go new file mode 100644 index 000000000000..4c18ed43c46f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/hypervisors/urls.go @@ -0,0 +1,19 @@ +package hypervisors + +import "github.com/gophercloud/gophercloud" + +func hypervisorsListDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-hypervisors", "detail") +} + +func hypervisorsStatisticsURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-hypervisors", "statistics") +} + +func hypervisorsGetURL(c *gophercloud.ServiceClient, hypervisorID string) string { + return c.ServiceURL("os-hypervisors", hypervisorID) +} + +func hypervisorsUptimeURL(c *gophercloud.ServiceClient, hypervisorID string) string { + return c.ServiceURL("os-hypervisors", hypervisorID, "uptime") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go new file mode 100644 index 000000000000..24c4607722e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/doc.go @@ -0,0 +1,71 @@ +/* +Package keypairs provides the ability to manage key pairs as well as create +servers with a specified key pair. + +Example to List Key Pairs + + allPages, err := keypairs.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allKeyPairs, err := keypairs.ExtractKeyPairs(allPages) + if err != nil { + panic(err) + } + + for _, kp := range allKeyPairs { + fmt.Printf("%+v\n", kp) + } + +Example to Create a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", keypair) + +Example to Import a Key Pair + + createOpts := keypairs.CreateOpts{ + Name: "keypair-name", + PublicKey: "public-key", + } + + keypair, err := keypairs.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Key Pair + + err := keypairs.Delete(computeClient, "keypair-name").ExtractErr() + if err != nil { + panic(err) + } + +Example to Create a Server With a Key Pair + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := keypairs.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + KeyName: "keypair-name", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package keypairs diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go new file mode 100644 index 000000000000..4e5e499e3aa4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/requests.go @@ -0,0 +1,86 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsExt adds a KeyPair option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // KeyName is the name of the key pair. + KeyName string `json:"key_name,omitempty"` +} + +// ToServerCreateMap adds the key_name to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + if opts.KeyName == "" { + return base, nil + } + + serverMap := base["server"].(map[string]interface{}) + serverMap["key_name"] = opts.KeyName + + return base, nil +} + +// List returns a Pager that allows you to iterate over a collection of KeyPairs. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return KeyPairPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToKeyPairCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies KeyPair creation or import parameters. +type CreateOpts struct { + // Name is a friendly name to refer to this KeyPair in other services. + Name string `json:"name" required:"true"` + + // PublicKey [optional] is a pregenerated OpenSSH-formatted public key. + // If provided, this key will be imported and no new key will be created. + PublicKey string `json:"public_key,omitempty"` +} + +// ToKeyPairCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToKeyPairCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "keypair") +} + +// Create requests the creation of a new KeyPair on the server, or to import a +// pre-existing keypair. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToKeyPairCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns public data about a previously uploaded KeyPair. +func Get(client *gophercloud.ServiceClient, name string) (r GetResult) { + _, r.Err = client.Get(getURL(client, name), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored KeyPair from the server. +func Delete(client *gophercloud.ServiceClient, name string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, name), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go new file mode 100644 index 000000000000..2d71034b101a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/results.go @@ -0,0 +1,91 @@ +package keypairs + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// KeyPair is an SSH key known to the OpenStack Cloud that is available to be +// injected into servers. +type KeyPair struct { + // Name is used to refer to this keypair from other services within this + // region. + Name string `json:"name"` + + // Fingerprint is a short sequence of bytes that can be used to authenticate + // or validate a longer public key. + Fingerprint string `json:"fingerprint"` + + // PublicKey is the public key from this pair, in OpenSSH format. + // "ssh-rsa AAAAB3Nz..." + PublicKey string `json:"public_key"` + + // PrivateKey is the private key from this pair, in PEM format. + // "-----BEGIN RSA PRIVATE KEY-----\nMIICXA..." + // It is only present if this KeyPair was just returned from a Create call. + PrivateKey string `json:"private_key"` + + // UserID is the user who owns this KeyPair. + UserID string `json:"user_id"` +} + +// KeyPairPage stores a single page of all KeyPair results from a List call. +// Use the ExtractKeyPairs function to convert the results to a slice of +// KeyPairs. +type KeyPairPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a KeyPairPage is empty. +func (page KeyPairPage) IsEmpty() (bool, error) { + ks, err := ExtractKeyPairs(page) + return len(ks) == 0, err +} + +// ExtractKeyPairs interprets a page of results as a slice of KeyPairs. +func ExtractKeyPairs(r pagination.Page) ([]KeyPair, error) { + type pair struct { + KeyPair KeyPair `json:"keypair"` + } + var s struct { + KeyPairs []pair `json:"keypairs"` + } + err := (r.(KeyPairPage)).ExtractInto(&s) + results := make([]KeyPair, len(s.KeyPairs)) + for i, pair := range s.KeyPairs { + results[i] = pair.KeyPair + } + return results, err +} + +type keyPairResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any KeyPair resource response +// as a KeyPair struct. +func (r keyPairResult) Extract() (*KeyPair, error) { + var s struct { + KeyPair *KeyPair `json:"keypair"` + } + err := r.ExtractInto(&s) + return s.KeyPair, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a KeyPair. +type CreateResult struct { + keyPairResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a KeyPair. +type GetResult struct { + keyPairResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/doc.go new file mode 100644 index 000000000000..8d4200983ed8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/doc.go @@ -0,0 +1,2 @@ +// keypairs unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/fixtures.go new file mode 100644 index 000000000000..dc716d8db4dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/fixtures.go @@ -0,0 +1,170 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "keypairs": [ + { + "keypair": { + "fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a", + "name": "firstkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n" + } + }, + { + "keypair": { + "fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + "name": "secondkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n" + } + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "keypair": { + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n", + "name": "firstkey", + "fingerprint": "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a" + } +} +` + +// CreateOutput is a sample response to a Create call. +const CreateOutput = ` +{ + "keypair": { + "fingerprint": "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + "name": "createdkey", + "private_key": "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", + "user_id": "fake" + } +} +` + +// ImportOutput is a sample response to a Create call that provides its own public key. +const ImportOutput = ` +{ + "keypair": { + "fingerprint": "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + "name": "importedkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + "user_id": "fake" + } +} +` + +// FirstKeyPair is the first result in ListOutput. +var FirstKeyPair = keypairs.KeyPair{ + Name: "firstkey", + Fingerprint: "15:b0:f8:b3:f9:48:63:71:cf:7b:5b:38:6d:44:2d:4a", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC+Eo/RZRngaGTkFs7I62ZjsIlO79KklKbMXi8F+KITD4bVQHHn+kV+4gRgkgCRbdoDqoGfpaDFs877DYX9n4z6FrAIZ4PES8TNKhatifpn9NdQYWA+IkU8CuvlEKGuFpKRi/k7JLos/gHi2hy7QUwgtRvcefvD/vgQZOVw/mGR9Q== Generated by Nova\n", +} + +// SecondKeyPair is the second result in ListOutput. +var SecondKeyPair = keypairs.KeyPair{ + Name: "secondkey", + Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", +} + +// ExpectedKeyPairSlice is the slice of results that should be parsed from ListOutput, in the expected +// order. +var ExpectedKeyPairSlice = []keypairs.KeyPair{FirstKeyPair, SecondKeyPair} + +// CreatedKeyPair is the parsed result from CreatedOutput. +var CreatedKeyPair = keypairs.KeyPair{ + Name: "createdkey", + Fingerprint: "35:9d:d0:c3:4a:80:d3:d8:86:f1:ca:f7:df:c4:f9:d8", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7DUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5Q== Generated by Nova\n", + PrivateKey: "-----BEGIN RSA PRIVATE KEY-----\nMIICXAIBAAKBgQC9mC3WZN9UGLxgPBpP7H5jZMc6pKwOoSgre8yun6REFktn/Kz7\nDUt9jaR1UJyRzHxITfCfAIgSxPdGqB/oF1suMyWgu5i0625vavLB5z5kC8Hq3qZJ\n9zJO1poE1kyD+htiTtPWJ88e12xuH2XB/CZN9OpEiF98hAagiOE0EnOS5QIDAQAB\nAoGAE5XO1mDhORy9COvsg+kYPUhB1GsCYxh+v88wG7HeFDKBY6KUc/Kxo6yoGn5T\nTjRjekyi2KoDZHz4VlIzyZPwFS4I1bf3oCunVoAKzgLdmnTtvRNMC5jFOGc2vUgP\n9bSyRj3S1R4ClVk2g0IDeagko/jc8zzLEYuIK+fbkds79YECQQDt3vcevgegnkga\ntF4NsDmmBPRkcSHCqrANP/7vFcBQN3czxeYYWX3DK07alu6GhH1Y4sHbdm616uU0\nll7xbDzxAkEAzAtN2IyftNygV2EGiaGgqLyo/tD9+Vui2qCQplqe4jvWh/5Sparl\nOjmKo+uAW+hLrLVMnHzRWxbWU8hirH5FNQJATO+ZxCK4etXXAnQmG41NCAqANWB2\nB+2HJbH2NcQ2QHvAHUm741JGn/KI/aBlo7KEjFRDWUVUB5ji64BbUwCsMQJBAIku\nLGcjnBf/oLk+XSPZC2eGd2Ph5G5qYmH0Q2vkTx+wtTn3DV+eNsDfgMtWAJVJ5t61\ngU1QSXyhLPVlKpnnxuUCQC+xvvWjWtsLaFtAsZywJiqLxQzHts8XLGZptYJ5tLWV\nrtmYtBcJCN48RrgQHry/xWYeA4K/AFQpXfNPgprQ96Q=\n-----END RSA PRIVATE KEY-----\n", + UserID: "fake", +} + +// ImportedKeyPair is the parsed result from ImportOutput. +var ImportedKeyPair = keypairs.KeyPair{ + Name: "importedkey", + Fingerprint: "1e:2c:9b:56:79:4b:45:77:f9:ca:7a:98:2c:b0:d5:3c", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + UserID: "fake", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request for "firstkey". +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs/firstkey", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request for a new +// keypair called "createdkey". +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "keypair": { "name": "createdkey" } }`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleImportSuccessfully configures the test server to respond to an Import request for an +// existing keypair called "importedkey". +func HandleImportSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "keypair": { + "name": "importedkey", + "public_key": "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ImportOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// keypair called "deletedkey". +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-keypairs/deletedkey", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/requests_test.go new file mode 100644 index 000000000000..1e05e6687e33 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/testing/requests_test.go @@ -0,0 +1,72 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := keypairs.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := keypairs.ExtractKeyPairs(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedKeyPairSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := keypairs.Create(client.ServiceClient(), keypairs.CreateOpts{ + Name: "createdkey", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedKeyPair, actual) +} + +func TestImport(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleImportSuccessfully(t) + + actual, err := keypairs.Create(client.ServiceClient(), keypairs.CreateOpts{ + Name: "importedkey", + PublicKey: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAAgQDx8nkQv/zgGgB4rMYmIf+6A4l6Rr+o/6lHBQdW5aYd44bd8JttDCE/F/pNRr0lRE+PiqSPO8nDPHw0010JeMH9gYgnnFlyY3/OcJ02RhIPyyxYpv9FhY+2YiUkpwFOcLImyrxEsYXpD/0d3ac30bNH6Sw9JD9UZHYcpSxsIbECHw== Generated by Nova", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &ImportedKeyPair, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := keypairs.Get(client.ServiceClient(), "firstkey").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstKeyPair, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := keypairs.Delete(client.ServiceClient(), "deletedkey").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go new file mode 100644 index 000000000000..fec38f36793f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/keypairs/urls.go @@ -0,0 +1,25 @@ +package keypairs + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-keypairs" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name string) string { + return c.ServiceURL(resourcePath, name) +} + +func deleteURL(c *gophercloud.ServiceClient, name string) string { + return getURL(c, name) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/doc.go new file mode 100644 index 000000000000..c14d537a6ddd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/doc.go @@ -0,0 +1,17 @@ +/* +Package limits shows rate and limit information for a tenant/project. + +Example to Retrieve Limits for a Tenant + + getOpts := limits.GetOpts{ + TenantID: "tenant-id", + } + + limits, err := limits.Get(computeClient, getOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", limits) +*/ +package limits diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/requests.go new file mode 100644 index 000000000000..57573d38c157 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/requests.go @@ -0,0 +1,39 @@ +package limits + +import ( + "github.com/gophercloud/gophercloud" +) + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToLimitsQuery() (string, error) +} + +// GetOpts enables retrieving limits by a specific tenant. +type GetOpts struct { + // The tenant ID to retrieve limits for. + TenantID string `q:"tenant_id"` +} + +// ToLimitsQuery formats a GetOpts into a query string. +func (opts GetOpts) ToLimitsQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Get returns the limits about the currently scoped tenant. +func Get(client *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { + url := getURL(client) + if opts != nil { + query, err := opts.ToLimitsQuery() + if err != nil { + r.Err = err + return + } + url += query + } + + _, r.Err = client.Get(url, &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/results.go new file mode 100644 index 000000000000..8d0564bd2faa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/results.go @@ -0,0 +1,90 @@ +package limits + +import ( + "github.com/gophercloud/gophercloud" +) + +// Limits is a struct that contains the response of a limit query. +type Limits struct { + // Absolute contains the limits and usage information. + Absolute Absolute `json:"absolute"` +} + +// Usage is a struct that contains the current resource usage and limits +// of a tenant. +type Absolute struct { + // MaxTotalCores is the number of cores available to a tenant. + MaxTotalCores int `json:"maxTotalCores"` + + // MaxImageMeta is the amount of image metadata available to a tenant. + MaxImageMeta int `json:"maxImageMeta"` + + // MaxServerMeta is the amount of server metadata available to a tenant. + MaxServerMeta int `json:"maxServerMeta"` + + // MaxPersonality is the amount of personality/files available to a tenant. + MaxPersonality int `json:"maxPersonality"` + + // MaxPersonalitySize is the personality file size available to a tenant. + MaxPersonalitySize int `json:"maxPersonalitySize"` + + // MaxTotalKeypairs is the total keypairs available to a tenant. + MaxTotalKeypairs int `json:"maxTotalKeypairs"` + + // MaxSecurityGroups is the number of security groups available to a tenant. + MaxSecurityGroups int `json:"maxSecurityGroups"` + + // MaxSecurityGroupRules is the number of security group rules available to + // a tenant. + MaxSecurityGroupRules int `json:"maxSecurityGroupRules"` + + // MaxServerGroups is the number of server groups available to a tenant. + MaxServerGroups int `json:"maxServerGroups"` + + // MaxServerGroupMembers is the number of server group members available + // to a tenant. + MaxServerGroupMembers int `json:"maxServerGroupMembers"` + + // MaxTotalFloatingIps is the number of floating IPs available to a tenant. + MaxTotalFloatingIps int `json:"maxTotalFloatingIps"` + + // MaxTotalInstances is the number of instances/servers available to a tenant. + MaxTotalInstances int `json:"maxTotalInstances"` + + // MaxTotalRAMSize is the total amount of RAM available to a tenant measured + // in megabytes (MB). + MaxTotalRAMSize int `json:"maxTotalRAMSize"` + + // TotalCoresUsed is the number of cores currently in use. + TotalCoresUsed int `json:"totalCoresUsed"` + + // TotalInstancesUsed is the number of instances/servers in use. + TotalInstancesUsed int `json:"totalInstancesUsed"` + + // TotalFloatingIpsUsed is the number of floating IPs in use. + TotalFloatingIpsUsed int `json:"totalFloatingIpsUsed"` + + // TotalRAMUsed is the total RAM/memory in use measured in megabytes (MB). + TotalRAMUsed int `json:"totalRAMUsed"` + + // TotalSecurityGroupsUsed is the total number of security groups in use. + TotalSecurityGroupsUsed int `json:"totalSecurityGroupsUsed"` + + // TotalServerGroupsUsed is the total number of server groups in use. + TotalServerGroupsUsed int `json:"totalServerGroupsUsed"` +} + +// Extract interprets a limits result as a Limits. +func (r GetResult) Extract() (*Limits, error) { + var s struct { + Limits *Limits `json:"limits"` + } + err := r.ExtractInto(&s) + return s.Limits, err +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as an Absolute. +type GetResult struct { + gophercloud.Result +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/fixtures.go new file mode 100644 index 000000000000..d4e52f77838c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/fixtures.go @@ -0,0 +1,80 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "limits": { + "rate": [], + "absolute": { + "maxServerMeta": 128, + "maxPersonality": 5, + "totalServerGroupsUsed": 0, + "maxImageMeta": 128, + "maxPersonalitySize": 10240, + "maxTotalKeypairs": 100, + "maxSecurityGroupRules": 20, + "maxServerGroups": 10, + "totalCoresUsed": 1, + "totalRAMUsed": 2048, + "totalInstancesUsed": 1, + "maxSecurityGroups": 10, + "totalFloatingIpsUsed": 0, + "maxTotalCores": 20, + "maxServerGroupMembers": 10, + "maxTotalFloatingIps": 10, + "totalSecurityGroupsUsed": 1, + "maxTotalInstances": 10, + "maxTotalRAMSize": 51200 + } + } +} +` + +// LimitsResult is the result of the limits in GetOutput. +var LimitsResult = limits.Limits{ + Absolute: limits.Absolute{ + MaxServerMeta: 128, + MaxPersonality: 5, + TotalServerGroupsUsed: 0, + MaxImageMeta: 128, + MaxPersonalitySize: 10240, + MaxTotalKeypairs: 100, + MaxSecurityGroupRules: 20, + MaxServerGroups: 10, + TotalCoresUsed: 1, + TotalRAMUsed: 2048, + TotalInstancesUsed: 1, + MaxSecurityGroups: 10, + TotalFloatingIpsUsed: 0, + MaxTotalCores: 20, + MaxServerGroupMembers: 10, + MaxTotalFloatingIps: 10, + TotalSecurityGroupsUsed: 1, + MaxTotalInstances: 10, + MaxTotalRAMSize: 51200, + }, +} + +const TenantID = "555544443333222211110000ffffeeee" + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for a limit. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/limits", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/requests_test.go new file mode 100644 index 000000000000..9c8456c9dddf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/testing/requests_test.go @@ -0,0 +1,23 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + getOpts := limits.GetOpts{ + TenantID: TenantID, + } + + actual, err := limits.Get(client.ServiceClient(), getOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &LimitsResult, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/urls.go new file mode 100644 index 000000000000..edd97e4e0ee9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/limits/urls.go @@ -0,0 +1,11 @@ +package limits + +import ( + "github.com/gophercloud/gophercloud" +) + +const resourcePath = "limits" + +func getURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/doc.go new file mode 100644 index 000000000000..ac51a36f619c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/doc.go @@ -0,0 +1,19 @@ +/* +Package lockunlock provides functionality to lock and unlock servers that +have been provisioned by the OpenStack Compute service. + +Example to Lock and Unlock a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := lockunlock.Lock(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err = lockunlock.Unlock(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package lockunlock diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/requests.go new file mode 100644 index 000000000000..5243d0fba5bd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/requests.go @@ -0,0 +1,19 @@ +package lockunlock + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Lock is the operation responsible for locking a Compute server. +func Lock(client *gophercloud.ServiceClient, id string) (r LockResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"lock": nil}, nil, nil) + return +} + +// Unlock is the operation responsible for unlocking a Compute server. +func Unlock(client *gophercloud.ServiceClient, id string) (r UnlockResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"unlock": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/results.go new file mode 100644 index 000000000000..282bc8a0eba2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/results.go @@ -0,0 +1,16 @@ +package lockunlock + +import ( + "github.com/gophercloud/gophercloud" +) + +// LockResult and UnlockResult are the responses from a Lock and Unlock +// operations respectively. Call their ExtractErr methods to determine if the +// requests suceeded or failed. +type LockResult struct { + gophercloud.ErrResult +} + +type UnlockResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/doc.go new file mode 100644 index 000000000000..59cb9be47190 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/doc.go @@ -0,0 +1,2 @@ +// unlocklock unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/fixtures.go new file mode 100644 index 000000000000..ec79b75321bf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/fixtures.go @@ -0,0 +1,27 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockStartServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"lock": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockStopServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"unlock": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/request_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/request_test.go new file mode 100644 index 000000000000..cb2906d27b86 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock/testing/request_test.go @@ -0,0 +1,31 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/lockunlock" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "{serverId}" + +func TestLock(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStartServerResponse(t, serverID) + + err := lockunlock.Lock(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUnlock(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStopServerResponse(t, serverID) + + err := lockunlock.Unlock(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/doc.go new file mode 100644 index 000000000000..cf3067716d8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/doc.go @@ -0,0 +1,30 @@ +/* +Package migrate provides functionality to migrate servers that have been +provisioned by the OpenStack Compute service. + +Example of Migrate Server (migrate Action) + + serverID := "b16ba811-199d-4ffd-8839-ba96c1185a67" + err := migrate.Migrate(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example of Live-Migrate Server (os-migrateLive Action) + + serverID := "b16ba811-199d-4ffd-8839-ba96c1185a67" + host := "01c0cadef72d47e28a672a76060d492c" + blockMigration := false + + migrationOpts := migrate.LiveMigrateOpts{ + Host: &host, + BlockMigration: &blockMigration, + } + + err := migrate.LiveMigrate(computeClient, serverID, migrationOpts).ExtractErr() + if err != nil { + panic(err) + } + +*/ +package migrate diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/requests.go new file mode 100644 index 000000000000..90ae62e38142 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/requests.go @@ -0,0 +1,50 @@ +package migrate + +import ( + "github.com/gophercloud/gophercloud" +) + +// Migrate will initiate a migration of the instance to another host. +func Migrate(client *gophercloud.ServiceClient, id string) (r MigrateResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"migrate": nil}, nil, nil) + return +} + +// LiveMigrateOptsBuilder allows extensions to add additional parameters to the +// LiveMigrate request. +type LiveMigrateOptsBuilder interface { + ToLiveMigrateMap() (map[string]interface{}, error) +} + +// LiveMigrateOpts specifies parameters of live migrate action. +type LiveMigrateOpts struct { + // The host to which to migrate the server. + // If this parameter is None, the scheduler chooses a host. + Host *string `json:"host"` + + // Set to True to migrate local disks by using block migration. + // If the source or destination host uses shared storage and you set + // this value to True, the live migration fails. + BlockMigration *bool `json:"block_migration,omitempty"` + + // Set to True to enable over commit when the destination host is checked + // for available disk space. Set to False to disable over commit. This setting + // affects only the libvirt virt driver. + DiskOverCommit *bool `json:"disk_over_commit,omitempty"` +} + +// ToLiveMigrateMap constructs a request body from LiveMigrateOpts. +func (opts LiveMigrateOpts) ToLiveMigrateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-migrateLive") +} + +// LiveMigrate will initiate a live-migration (without rebooting) of the instance to another host. +func LiveMigrate(client *gophercloud.ServiceClient, id string, opts LiveMigrateOptsBuilder) (r MigrateResult) { + b, err := opts.ToLiveMigrateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/results.go new file mode 100644 index 000000000000..ebccf56ac3dc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/results.go @@ -0,0 +1,11 @@ +package migrate + +import ( + "github.com/gophercloud/gophercloud" +) + +// MigrateResult is the response from a Migrate operation. Call its ExtractErr +// method to determine if the request suceeded or failed. +type MigrateResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/doc.go new file mode 100644 index 000000000000..613547573960 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/doc.go @@ -0,0 +1,2 @@ +// compute_extensions_startstop_v2 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/fixtures.go new file mode 100644 index 000000000000..1d2f5902c293 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/fixtures.go @@ -0,0 +1,33 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockMigrateResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"migrate": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockLiveMigrateResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "os-migrateLive": { + "host": "01c0cadef72d47e28a672a76060d492c", + "block_migration": false, + "disk_over_commit": true + } + }`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/requests_test.go new file mode 100644 index 000000000000..b6906b7839fb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/testing/requests_test.go @@ -0,0 +1,41 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + +func TestMigrate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockMigrateResponse(t, serverID) + + err := migrate.Migrate(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestLiveMigrate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockLiveMigrateResponse(t, serverID) + + host := "01c0cadef72d47e28a672a76060d492c" + blockMigration := false + diskOverCommit := true + + migrationOpts := migrate.LiveMigrateOpts{ + Host: &host, + BlockMigration: &blockMigration, + DiskOverCommit: &diskOverCommit, + } + + err := migrate.LiveMigrate(client.ServiceClient(), serverID, migrationOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/urls.go new file mode 100644 index 000000000000..b8e2a437d7d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/migrate/urls.go @@ -0,0 +1,9 @@ +package migrate + +import ( + "github.com/gophercloud/gophercloud" +) + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/doc.go new file mode 100644 index 000000000000..f291734e9ebb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/doc.go @@ -0,0 +1,24 @@ +/* +Package networks provides the ability to create and manage networks in cloud +environments using nova-network. + +This package can also be used to retrieve network details of Neutron-based +networks. + +Example to List Networks + + allPages, err := networks.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } +*/ +package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/requests.go new file mode 100644 index 000000000000..5432a1025c37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/requests.go @@ -0,0 +1,19 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Network. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + }) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/results.go new file mode 100644 index 000000000000..c36ce678ccbd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/results.go @@ -0,0 +1,133 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A Network represents a network in an OpenStack cloud. +type Network struct { + // The Bridge that VIFs on this network are connected to + Bridge string `json:"bridge"` + + // BridgeInterface is what interface is connected to the Bridge + BridgeInterface string `json:"bridge_interface"` + + // The Broadcast address of the network. + Broadcast string `json:"broadcast"` + + // CIDR is the IPv4 subnet. + CIDR string `json:"cidr"` + + // CIDRv6 is the IPv6 subnet. + CIDRv6 string `json:"cidr_v6"` + + // CreatedAt is when the network was created.. + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at,omitempty"` + + // Deleted shows if the network has been deleted. + Deleted bool `json:"deleted"` + + // DeletedAt is the time when the network was deleted. + DeletedAt gophercloud.JSONRFC3339MilliNoZ `json:"deleted_at,omitempty"` + + // DHCPStart is the start of the DHCP address range. + DHCPStart string `json:"dhcp_start"` + + // DNS1 is the first DNS server to use through DHCP. + DNS1 string `json:"dns_1"` + + // DNS2 is the first DNS server to use through DHCP. + DNS2 string `json:"dns_2"` + + // Gateway is the network gateway. + Gateway string `json:"gateway"` + + // Gatewayv6 is the IPv6 network gateway. + Gatewayv6 string `json:"gateway_v6"` + + // Host is the host that the network service is running on. + Host string `json:"host"` + + // ID is the UUID of the network. + ID string `json:"id"` + + // Injected determines if network information is injected into the host. + Injected bool `json:"injected"` + + // Label is the common name that the network has.. + Label string `json:"label"` + + // MultiHost is if multi-host networking is enablec.. + MultiHost bool `json:"multi_host"` + + // Netmask is the network netmask. + Netmask string `json:"netmask"` + + // Netmaskv6 is the IPv6 netmask. + Netmaskv6 string `json:"netmask_v6"` + + // Priority is the network interface priority. + Priority int `json:"priority"` + + // ProjectID is the project associated with this network. + ProjectID string `json:"project_id"` + + // RXTXBase configures bandwidth entitlement. + RXTXBase int `json:"rxtx_base"` + + // UpdatedAt is the time when the network was last updated. + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at,omitempty"` + + // VLAN is the vlan this network runs on. + VLAN int `json:"vlan"` + + // VPNPrivateAddress is the private address of the CloudPipe VPN. + VPNPrivateAddress string `json:"vpn_private_address"` + + // VPNPublicAddress is the public address of the CloudPipe VPN. + VPNPublicAddress string `json:"vpn_public_address"` + + // VPNPublicPort is the port of the CloudPipe VPN. + VPNPublicPort int `json:"vpn_public_port"` +} + +// NetworkPage stores a single page of all Network results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Networks. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s struct { + Networks []Network `json:"networks"` + } + err := (r.(NetworkPage)).ExtractInto(&s) + return s.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource +// response as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + var s struct { + Network *Network `json:"network"` + } + err := r.ExtractInto(&s) + return s.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/doc.go new file mode 100644 index 000000000000..fc8511de47cd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/doc.go @@ -0,0 +1,2 @@ +// networks unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/fixtures.go new file mode 100644 index 000000000000..e2fa49b48a78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/fixtures.go @@ -0,0 +1,204 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "networks": [ + { + "bridge": "br100", + "bridge_interface": "eth0", + "broadcast": "10.0.0.7", + "cidr": "10.0.0.0/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.387525", + "deleted": false, + "dhcp_start": "10.0.0.3", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.1", + "gateway_v6": null, + "host": "nsokolov-desktop", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "injected": false, + "label": "mynet_0", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": "1234", + "rxtx_base": null, + "updated_at": "2011-08-16T09:26:13.048257", + "vlan": 100, + "vpn_private_address": "10.0.0.2", + "vpn_public_address": "127.0.0.1", + "vpn_public_port": 1000 + }, + { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "10.0.0.15", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.387525", + "deleted": false, + "dhcp_start": "10.0.0.11", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.9", + "gateway_v6": null, + "host": null, + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "injected": false, + "label": "mynet_1", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "vlan": 101, + "vpn_private_address": "10.0.0.10", + "vpn_public_address": null, + "vpn_public_port": 1001 + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "network": { + "bridge": "br101", + "bridge_interface": "eth0", + "broadcast": "10.0.0.15", + "cidr": "10.0.0.10/29", + "cidr_v6": null, + "created_at": "2011-08-15T06:19:19.387525", + "deleted": false, + "dhcp_start": "10.0.0.11", + "dns1": null, + "dns2": null, + "gateway": "10.0.0.9", + "gateway_v6": null, + "host": null, + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "injected": false, + "label": "mynet_1", + "multi_host": false, + "netmask": "255.255.255.248", + "netmask_v6": null, + "priority": null, + "project_id": null, + "rxtx_base": null, + "vlan": 101, + "vpn_private_address": "10.0.0.10", + "vpn_public_address": null, + "vpn_public_port": 1001 + } +} +` + +// FirstNetwork is the first result in ListOutput. +var nilTime time.Time +var FirstNetwork = networks.Network{ + Bridge: "br100", + BridgeInterface: "eth0", + Broadcast: "10.0.0.7", + CIDR: "10.0.0.0/29", + CIDRv6: "", + CreatedAt: gophercloud.JSONRFC3339MilliNoZ(time.Date(2011, 8, 15, 6, 19, 19, 387525000, time.UTC)), + Deleted: false, + DeletedAt: gophercloud.JSONRFC3339MilliNoZ(nilTime), + DHCPStart: "10.0.0.3", + DNS1: "", + DNS2: "", + Gateway: "10.0.0.1", + Gatewayv6: "", + Host: "nsokolov-desktop", + ID: "20c8acc0-f747-4d71-a389-46d078ebf047", + Injected: false, + Label: "mynet_0", + MultiHost: false, + Netmask: "255.255.255.248", + Netmaskv6: "", + Priority: 0, + ProjectID: "1234", + RXTXBase: 0, + UpdatedAt: gophercloud.JSONRFC3339MilliNoZ(time.Date(2011, 8, 16, 9, 26, 13, 48257000, time.UTC)), + VLAN: 100, + VPNPrivateAddress: "10.0.0.2", + VPNPublicAddress: "127.0.0.1", + VPNPublicPort: 1000, +} + +// SecondNetwork is the second result in ListOutput. +var SecondNetwork = networks.Network{ + Bridge: "br101", + BridgeInterface: "eth0", + Broadcast: "10.0.0.15", + CIDR: "10.0.0.10/29", + CIDRv6: "", + CreatedAt: gophercloud.JSONRFC3339MilliNoZ(time.Date(2011, 8, 15, 6, 19, 19, 387525000, time.UTC)), + Deleted: false, + DeletedAt: gophercloud.JSONRFC3339MilliNoZ(nilTime), + DHCPStart: "10.0.0.11", + DNS1: "", + DNS2: "", + Gateway: "10.0.0.9", + Gatewayv6: "", + Host: "", + ID: "20c8acc0-f747-4d71-a389-46d078ebf000", + Injected: false, + Label: "mynet_1", + MultiHost: false, + Netmask: "255.255.255.248", + Netmaskv6: "", + Priority: 0, + ProjectID: "", + RXTXBase: 0, + UpdatedAt: gophercloud.JSONRFC3339MilliNoZ(nilTime), + VLAN: 101, + VPNPrivateAddress: "10.0.0.10", + VPNPublicAddress: "", + VPNPublicPort: 1001, +} + +// ExpectedNetworkSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedNetworkSlice = []networks.Network{FirstNetwork, SecondNetwork} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing network. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/requests_test.go new file mode 100644 index 000000000000..36b5463e4211 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/testing/requests_test.go @@ -0,0 +1,38 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := networks.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := networks.ExtractNetworks(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := networks.Get(client.ServiceClient(), "20c8acc0-f747-4d71-a389-46d078ebf000").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondNetwork, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/urls.go new file mode 100644 index 000000000000..491bde6f628a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/networks/urls.go @@ -0,0 +1,17 @@ +package networks + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/doc.go new file mode 100644 index 000000000000..b260ca0767b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/doc.go @@ -0,0 +1,18 @@ +/* +Package pauseunpause provides functionality to pause and unpause servers that +have been provisioned by the OpenStack Compute service. + +Example to Pause and Unpause a Server + + serverID := "32c8baf7-1cdb-4cc2-bc31-c3a55b89f56b" + err := pauseunpause.Pause(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err = pauseunpause.Unpause(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package pauseunpause diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/requests.go new file mode 100644 index 000000000000..aeb880343fa5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/requests.go @@ -0,0 +1,19 @@ +package pauseunpause + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Pause is the operation responsible for pausing a Compute server. +func Pause(client *gophercloud.ServiceClient, id string) (r PauseResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"pause": nil}, nil, nil) + return +} + +// Unpause is the operation responsible for unpausing a Compute server. +func Unpause(client *gophercloud.ServiceClient, id string) (r UnpauseResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"unpause": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/results.go new file mode 100644 index 000000000000..3cb91d981abe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/results.go @@ -0,0 +1,15 @@ +package pauseunpause + +import "github.com/gophercloud/gophercloud" + +// PauseResult is the response from a Pause operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type PauseResult struct { + gophercloud.ErrResult +} + +// UnpauseResult is the response from an Unpause operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type UnpauseResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/doc.go new file mode 100644 index 000000000000..09538675098a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/doc.go @@ -0,0 +1,2 @@ +// pauseunpause unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/fixtures.go new file mode 100644 index 000000000000..3723bb33605f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/fixtures.go @@ -0,0 +1,27 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockPauseServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"pause": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockUnpauseServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"unpause": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/requests_test.go new file mode 100644 index 000000000000..0433e8c482ba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause/testing/requests_test.go @@ -0,0 +1,31 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/pauseunpause" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "{serverId}" + +func TestPause(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockPauseServerResponse(t, serverID) + + err := pauseunpause.Pause(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUnpause(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUnpauseServerResponse(t, serverID) + + err := pauseunpause.Unpause(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/doc.go new file mode 100644 index 000000000000..04d9887a1432 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/doc.go @@ -0,0 +1,36 @@ +/* +Package quotasets enables retrieving and managing Compute quotas. + +Example to Get a Quota Set + + quotaset, err := quotasets.Get(computeClient, "tenant-id").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) + +Example to Get a Detailed Quota Set + + quotaset, err := quotasets.GetDetail(computeClient, "tenant-id").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) + +Example to Update a Quota Set + + updateOpts := quotasets.UpdateOpts{ + FixedIPs: gophercloud.IntToPointer(100), + Cores: gophercloud.IntToPointer(64), + } + + quotaset, err := quotasets.Update(computeClient, "tenant-id", updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", quotaset) +*/ +package quotasets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/requests.go new file mode 100644 index 000000000000..d5daa7e3fb67 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/requests.go @@ -0,0 +1,99 @@ +package quotasets + +import ( + "github.com/gophercloud/gophercloud" +) + +// Get returns public data about a previously created QuotaSet. +func Get(client *gophercloud.ServiceClient, tenantID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, tenantID), &r.Body, nil) + return +} + +// GetDetail returns detailed public data about a previously created QuotaSet. +func GetDetail(client *gophercloud.ServiceClient, tenantID string) (r GetDetailResult) { + _, r.Err = client.Get(getDetailURL(client, tenantID), &r.Body, nil) + return +} + +// Updates the quotas for the given tenantID and returns the new QuotaSet. +func Update(client *gophercloud.ServiceClient, tenantID string, opts UpdateOptsBuilder) (r UpdateResult) { + reqBody, err := opts.ToComputeQuotaUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Put(updateURL(client, tenantID), reqBody, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// Resets the quotas for the given tenant to their default values. +func Delete(client *gophercloud.ServiceClient, tenantID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, tenantID), nil) + return +} + +// Options for Updating the quotas of a Tenant. +// All int-values are pointers so they can be nil if they are not needed. +// You can use gopercloud.IntToPointer() for convenience +type UpdateOpts struct { + // FixedIPs is number of fixed ips alloted this quota_set. + FixedIPs *int `json:"fixed_ips,omitempty"` + + // FloatingIPs is number of floating ips alloted this quota_set. + FloatingIPs *int `json:"floating_ips,omitempty"` + + // InjectedFileContentBytes is content bytes allowed for each injected file. + InjectedFileContentBytes *int `json:"injected_file_content_bytes,omitempty"` + + // InjectedFilePathBytes is allowed bytes for each injected file path. + InjectedFilePathBytes *int `json:"injected_file_path_bytes,omitempty"` + + // InjectedFiles is injected files allowed for each project. + InjectedFiles *int `json:"injected_files,omitempty"` + + // KeyPairs is number of ssh keypairs. + KeyPairs *int `json:"key_pairs,omitempty"` + + // MetadataItems is number of metadata items allowed for each instance. + MetadataItems *int `json:"metadata_items,omitempty"` + + // RAM is megabytes allowed for each instance. + RAM *int `json:"ram,omitempty"` + + // SecurityGroupRules is rules allowed for each security group. + SecurityGroupRules *int `json:"security_group_rules,omitempty"` + + // SecurityGroups security groups allowed for each project. + SecurityGroups *int `json:"security_groups,omitempty"` + + // Cores is number of instance cores allowed for each project. + Cores *int `json:"cores,omitempty"` + + // Instances is number of instances allowed for each project. + Instances *int `json:"instances,omitempty"` + + // Number of ServerGroups allowed for the project. + ServerGroups *int `json:"server_groups,omitempty"` + + // Max number of Members for each ServerGroup. + ServerGroupMembers *int `json:"server_group_members,omitempty"` + + // Force will update the quotaset even if the quota has already been used + // and the reserved quota exceeds the new quota. + Force bool `json:"force,omitempty"` +} + +// UpdateOptsBuilder enables extensins to add parameters to the update request. +type UpdateOptsBuilder interface { + // Extra specific name to prevent collisions with interfaces for other quotas + // (e.g. neutron) + ToComputeQuotaUpdateMap() (map[string]interface{}, error) +} + +// ToComputeQuotaUpdateMap builds the update options into a serializable +// format. +func (opts UpdateOpts) ToComputeQuotaUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "quota_set") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/results.go new file mode 100644 index 000000000000..e38868a63732 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/results.go @@ -0,0 +1,194 @@ +package quotasets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// QuotaSet is a set of operational limits that allow for control of compute +// usage. +type QuotaSet struct { + // ID is tenant associated with this QuotaSet. + ID string `json:"id"` + + // FixedIPs is number of fixed ips alloted this QuotaSet. + FixedIPs int `json:"fixed_ips"` + + // FloatingIPs is number of floating ips alloted this QuotaSet. + FloatingIPs int `json:"floating_ips"` + + // InjectedFileContentBytes is the allowed bytes for each injected file. + InjectedFileContentBytes int `json:"injected_file_content_bytes"` + + // InjectedFilePathBytes is allowed bytes for each injected file path. + InjectedFilePathBytes int `json:"injected_file_path_bytes"` + + // InjectedFiles is the number of injected files allowed for each project. + InjectedFiles int `json:"injected_files"` + + // KeyPairs is number of ssh keypairs. + KeyPairs int `json:"key_pairs"` + + // MetadataItems is number of metadata items allowed for each instance. + MetadataItems int `json:"metadata_items"` + + // RAM is megabytes allowed for each instance. + RAM int `json:"ram"` + + // SecurityGroupRules is number of security group rules allowed for each + // security group. + SecurityGroupRules int `json:"security_group_rules"` + + // SecurityGroups is the number of security groups allowed for each project. + SecurityGroups int `json:"security_groups"` + + // Cores is number of instance cores allowed for each project. + Cores int `json:"cores"` + + // Instances is number of instances allowed for each project. + Instances int `json:"instances"` + + // ServerGroups is the number of ServerGroups allowed for the project. + ServerGroups int `json:"server_groups"` + + // ServerGroupMembers is the number of members for each ServerGroup. + ServerGroupMembers int `json:"server_group_members"` +} + +// QuotaDetailSet represents details of both operational limits of compute +// resources and the current usage of those resources. +type QuotaDetailSet struct { + // ID is the tenant ID associated with this QuotaDetailSet. + ID string `json:"id"` + + // FixedIPs is number of fixed ips alloted this QuotaDetailSet. + FixedIPs QuotaDetail `json:"fixed_ips"` + + // FloatingIPs is number of floating ips alloted this QuotaDetailSet. + FloatingIPs QuotaDetail `json:"floating_ips"` + + // InjectedFileContentBytes is the allowed bytes for each injected file. + InjectedFileContentBytes QuotaDetail `json:"injected_file_content_bytes"` + + // InjectedFilePathBytes is allowed bytes for each injected file path. + InjectedFilePathBytes QuotaDetail `json:"injected_file_path_bytes"` + + // InjectedFiles is the number of injected files allowed for each project. + InjectedFiles QuotaDetail `json:"injected_files"` + + // KeyPairs is number of ssh keypairs. + KeyPairs QuotaDetail `json:"key_pairs"` + + // MetadataItems is number of metadata items allowed for each instance. + MetadataItems QuotaDetail `json:"metadata_items"` + + // RAM is megabytes allowed for each instance. + RAM QuotaDetail `json:"ram"` + + // SecurityGroupRules is number of security group rules allowed for each + // security group. + SecurityGroupRules QuotaDetail `json:"security_group_rules"` + + // SecurityGroups is the number of security groups allowed for each project. + SecurityGroups QuotaDetail `json:"security_groups"` + + // Cores is number of instance cores allowed for each project. + Cores QuotaDetail `json:"cores"` + + // Instances is number of instances allowed for each project. + Instances QuotaDetail `json:"instances"` + + // ServerGroups is the number of ServerGroups allowed for the project. + ServerGroups QuotaDetail `json:"server_groups"` + + // ServerGroupMembers is the number of members for each ServerGroup. + ServerGroupMembers QuotaDetail `json:"server_group_members"` +} + +// QuotaDetail is a set of details about a single operational limit that allows +// for control of compute usage. +type QuotaDetail struct { + // InUse is the current number of provisioned/allocated resources of the + // given type. + InUse int `json:"in_use"` + + // Reserved is a transitional state when a claim against quota has been made + // but the resource is not yet fully online. + Reserved int `json:"reserved"` + + // Limit is the maximum number of a given resource that can be + // allocated/provisioned. This is what "quota" usually refers to. + Limit int `json:"limit"` +} + +// QuotaSetPage stores a single page of all QuotaSet results from a List call. +type QuotaSetPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a QuotaSetsetPage is empty. +func (page QuotaSetPage) IsEmpty() (bool, error) { + ks, err := ExtractQuotaSets(page) + return len(ks) == 0, err +} + +// ExtractQuotaSets interprets a page of results as a slice of QuotaSets. +func ExtractQuotaSets(r pagination.Page) ([]QuotaSet, error) { + var s struct { + QuotaSets []QuotaSet `json:"quotas"` + } + err := (r.(QuotaSetPage)).ExtractInto(&s) + return s.QuotaSets, err +} + +type quotaResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any QuotaSet resource response +// as a QuotaSet struct. +func (r quotaResult) Extract() (*QuotaSet, error) { + var s struct { + QuotaSet *QuotaSet `json:"quota_set"` + } + err := r.ExtractInto(&s) + return s.QuotaSet, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a QuotaSet. +type GetResult struct { + quotaResult +} + +// UpdateResult is the response from a Update operation. Call its Extract method +// to interpret it as a QuotaSet. +type UpdateResult struct { + quotaResult +} + +// DeleteResult is the response from a Delete operation. Call its Extract method +// to interpret it as a QuotaSet. +type DeleteResult struct { + quotaResult +} + +type quotaDetailResult struct { + gophercloud.Result +} + +// GetDetailResult is the response from a Get operation. Call its Extract +// method to interpret it as a QuotaSet. +type GetDetailResult struct { + quotaDetailResult +} + +// Extract is a method that attempts to interpret any QuotaDetailSet +// resource response as a set of QuotaDetailSet structs. +func (r quotaDetailResult) Extract() (QuotaDetailSet, error) { + var s struct { + QuotaData QuotaDetailSet `json:"quota_set"` + } + err := r.ExtractInto(&s) + return s.QuotaData, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/doc.go new file mode 100644 index 000000000000..30d864eb95f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/doc.go @@ -0,0 +1,2 @@ +// quotasets unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/fixtures.go new file mode 100644 index 000000000000..2915d3103728 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/fixtures.go @@ -0,0 +1,215 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "quota_set" : { + "instances" : 25, + "security_groups" : 10, + "security_group_rules" : 20, + "cores" : 200, + "injected_file_content_bytes" : 10240, + "injected_files" : 5, + "metadata_items" : 128, + "ram" : 200000, + "key_pairs" : 10, + "injected_file_path_bytes" : 255, + "server_groups" : 2, + "server_group_members" : 3 + } +} +` + +// GetDetailsOutput is a sample response to a Get call with the detailed option. +const GetDetailsOutput = ` +{ + "quota_set" : { + "id": "555544443333222211110000ffffeeee", + "instances" : { + "in_use": 0, + "limit": 25, + "reserved": 0 + }, + "security_groups" : { + "in_use": 0, + "limit": 10, + "reserved": 0 + }, + "security_group_rules" : { + "in_use": 0, + "limit": 20, + "reserved": 0 + }, + "cores" : { + "in_use": 0, + "limit": 200, + "reserved": 0 + }, + "injected_file_content_bytes" : { + "in_use": 0, + "limit": 10240, + "reserved": 0 + }, + "injected_files" : { + "in_use": 0, + "limit": 5, + "reserved": 0 + }, + "metadata_items" : { + "in_use": 0, + "limit": 128, + "reserved": 0 + }, + "ram" : { + "in_use": 0, + "limit": 200000, + "reserved": 0 + }, + "key_pairs" : { + "in_use": 0, + "limit": 10, + "reserved": 0 + }, + "injected_file_path_bytes" : { + "in_use": 0, + "limit": 255, + "reserved": 0 + }, + "server_groups" : { + "in_use": 0, + "limit": 2, + "reserved": 0 + }, + "server_group_members" : { + "in_use": 0, + "limit": 3, + "reserved": 0 + } + } +} +` +const FirstTenantID = "555544443333222211110000ffffeeee" + +// FirstQuotaSet is the first result in ListOutput. +var FirstQuotaSet = quotasets.QuotaSet{ + FixedIPs: 0, + FloatingIPs: 0, + InjectedFileContentBytes: 10240, + InjectedFilePathBytes: 255, + InjectedFiles: 5, + KeyPairs: 10, + MetadataItems: 128, + RAM: 200000, + SecurityGroupRules: 20, + SecurityGroups: 10, + Cores: 200, + Instances: 25, + ServerGroups: 2, + ServerGroupMembers: 3, +} + +// FirstQuotaDetailsSet is the first result in ListOutput. +var FirstQuotaDetailsSet = quotasets.QuotaDetailSet{ + ID: FirstTenantID, + InjectedFileContentBytes: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 10240}, + InjectedFilePathBytes: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 255}, + InjectedFiles: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 5}, + KeyPairs: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 10}, + MetadataItems: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 128}, + RAM: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 200000}, + SecurityGroupRules: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 20}, + SecurityGroups: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 10}, + Cores: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 200}, + Instances: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 25}, + ServerGroups: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 2}, + ServerGroupMembers: quotasets.QuotaDetail{InUse: 0, Reserved: 0, Limit: 3}, +} + +//The expected update Body. Is also returned by PUT request +const UpdateOutput = `{"quota_set":{"cores":200,"fixed_ips":0,"floating_ips":0,"injected_file_content_bytes":10240,"injected_file_path_bytes":255,"injected_files":5,"instances":25,"key_pairs":10,"metadata_items":128,"ram":200000,"security_group_rules":20,"security_groups":10,"server_groups":2,"server_group_members":3}}` + +//The expected partialupdate Body. Is also returned by PUT request +const PartialUpdateBody = `{"quota_set":{"cores":200, "force":true}}` + +//Result of Quota-update +var UpdatedQuotaSet = quotasets.UpdateOpts{ + FixedIPs: gophercloud.IntToPointer(0), + FloatingIPs: gophercloud.IntToPointer(0), + InjectedFileContentBytes: gophercloud.IntToPointer(10240), + InjectedFilePathBytes: gophercloud.IntToPointer(255), + InjectedFiles: gophercloud.IntToPointer(5), + KeyPairs: gophercloud.IntToPointer(10), + MetadataItems: gophercloud.IntToPointer(128), + RAM: gophercloud.IntToPointer(200000), + SecurityGroupRules: gophercloud.IntToPointer(20), + SecurityGroups: gophercloud.IntToPointer(10), + Cores: gophercloud.IntToPointer(200), + Instances: gophercloud.IntToPointer(25), + ServerGroups: gophercloud.IntToPointer(2), + ServerGroupMembers: gophercloud.IntToPointer(3), +} + +// HandleGetSuccessfully configures the test server to respond to a Get request for sample tenant +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleGetDetailSuccessfully configures the test server to respond to a Get Details request for sample tenant +func HandleGetDetailSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID+"/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetDetailsOutput) + }) +} + +// HandlePutSuccessfully configures the test server to respond to a Put request for sample tenant +func HandlePutSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateOutput) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandlePartialPutSuccessfully configures the test server to respond to a Put request for sample tenant that only containes specific values +func HandlePartialPutSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, PartialUpdateBody) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for sample tenant +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-quota-sets/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestBody(t, r, "") + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(202) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/requests_test.go new file mode 100644 index 000000000000..ccac51b364df --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/testing/requests_test.go @@ -0,0 +1,73 @@ +package testing + +import ( + "errors" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + actual, err := quotasets.Get(client.ServiceClient(), FirstTenantID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstQuotaSet, actual) +} + +func TestGetDetail(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetDetailSuccessfully(t) + actual, err := quotasets.GetDetail(client.ServiceClient(), FirstTenantID).Extract() + th.CheckDeepEquals(t, FirstQuotaDetailsSet, actual) + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePutSuccessfully(t) + actual, err := quotasets.Update(client.ServiceClient(), FirstTenantID, UpdatedQuotaSet).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstQuotaSet, actual) +} + +func TestPartialUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePartialPutSuccessfully(t) + opts := quotasets.UpdateOpts{Cores: gophercloud.IntToPointer(200), Force: true} + actual, err := quotasets.Update(client.ServiceClient(), FirstTenantID, opts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstQuotaSet, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + _, err := quotasets.Delete(client.ServiceClient(), FirstTenantID).Extract() + th.AssertNoErr(t, err) +} + +type ErrorUpdateOpts quotasets.UpdateOpts + +func (opts ErrorUpdateOpts) ToComputeQuotaUpdateMap() (map[string]interface{}, error) { + return nil, errors.New("This is an error") +} + +func TestErrorInToComputeQuotaUpdateMap(t *testing.T) { + opts := &ErrorUpdateOpts{} + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePutSuccessfully(t) + _, err := quotasets.Update(client.ServiceClient(), FirstTenantID, opts).Extract() + if err == nil { + t.Fatal("Error handling failed") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/urls.go new file mode 100644 index 000000000000..37e50215b51d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/quotasets/urls.go @@ -0,0 +1,25 @@ +package quotasets + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-quota-sets" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func getURL(c *gophercloud.ServiceClient, tenantID string) string { + return c.ServiceURL(resourcePath, tenantID) +} + +func getDetailURL(c *gophercloud.ServiceClient, tenantID string) string { + return c.ServiceURL(resourcePath, tenantID, "detail") +} + +func updateURL(c *gophercloud.ServiceClient, tenantID string) string { + return getURL(c, tenantID) +} + +func deleteURL(c *gophercloud.ServiceClient, tenantID string) string { + return getURL(c, tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/doc.go new file mode 100644 index 000000000000..2081018cdb03 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/doc.go @@ -0,0 +1,28 @@ +/* +Package rescueunrescue provides the ability to place a server into rescue mode +and to return it back. + +Example to Rescue a server + + rescueOpts := rescueunrescue.RescueOpts{ + AdminPass: "aUPtawPzE9NU", + RescueImageRef: "115e5c5b-72f0-4a0a-9067-60706545248c", + } + serverID := "3f54d05f-3430-4d80-aa07-63e6af9e2488" + + adminPass, err := rescueunrescue.Rescue(computeClient, serverID, rescueOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("adminPass of the rescued server %s: %s\n", serverID, adminPass) + +Example to Unrescue a server + + serverID := "3f54d05f-3430-4d80-aa07-63e6af9e2488" + + if err := rescueunrescue.Unrescue(computeClient, serverID).ExtractErr(); err != nil { + panic(err) + } +*/ +package rescueunrescue diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/requests.go new file mode 100644 index 000000000000..61a23aad668b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/requests.go @@ -0,0 +1,48 @@ +package rescueunrescue + +import "github.com/gophercloud/gophercloud" + +// RescueOptsBuilder is an interface that allows extensions to override the +// default structure of a Rescue request. +type RescueOptsBuilder interface { + ToServerRescueMap() (map[string]interface{}, error) +} + +// RescueOpts represents the configuration options used to control a Rescue +// option. +type RescueOpts struct { + // AdminPass is the desired administrative password for the instance in + // RESCUE mode. + // If it's left blank, the server will generate a password. + AdminPass string `json:"adminPass,omitempty"` + + // RescueImageRef contains reference on an image that needs to be used as + // rescue image. + // If it's left blank, the server will be rescued with the default image. + RescueImageRef string `json:"rescue_image_ref,omitempty"` +} + +// ToServerRescueMap formats a RescueOpts as a map that can be used as a JSON +// request body for the Rescue request. +func (opts RescueOpts) ToServerRescueMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rescue") +} + +// Rescue instructs the provider to place the server into RESCUE mode. +func Rescue(client *gophercloud.ServiceClient, id string, opts RescueOptsBuilder) (r RescueResult) { + b, err := opts.ToServerRescueMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Unrescue instructs the provider to return the server from RESCUE mode. +func Unrescue(client *gophercloud.ServiceClient, id string) (r UnrescueResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"unrescue": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/results.go new file mode 100644 index 000000000000..1966b15c46ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/results.go @@ -0,0 +1,28 @@ +package rescueunrescue + +import "github.com/gophercloud/gophercloud" + +type commonResult struct { + gophercloud.Result +} + +// RescueResult is the response from a Rescue operation. Call its Extract +// method to retrieve adminPass for a rescued server. +type RescueResult struct { + commonResult +} + +// UnrescueResult is the response from an UnRescue operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type UnrescueResult struct { + gophercloud.ErrResult +} + +// Extract interprets any RescueResult as an AdminPass, if possible. +func (r RescueResult) Extract() (string, error) { + var s struct { + AdminPass string `json:"adminPass"` + } + err := r.ExtractInto(&s) + return s.AdminPass, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/fixtures.go new file mode 100644 index 000000000000..c822193d6a2d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/fixtures.go @@ -0,0 +1,25 @@ +package testing + +// RescueRequest represents request to rescue a server. +const RescueRequest = ` +{ + "rescue": { + "adminPass": "aUPtawPzE9NU", + "rescue_image_ref": "115e5c5b-72f0-4a0a-9067-60706545248c" + } +} +` + +// RescueResult represents a raw server response to a RescueRequest. +const RescueResult = ` +{ + "adminPass": "aUPtawPzE9NU" +} +` + +// UnrescueRequest represents request to unrescue a server. +const UnrescueRequest = ` +{ + "unrescue": null +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/requests_test.go new file mode 100644 index 000000000000..fda9023e2fa9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/testing/requests_test.go @@ -0,0 +1,49 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestRescue(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/3f54d05f-3430-4d80-aa07-63e6af9e2488/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, RescueRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, RescueResult) + }) + + s, err := rescueunrescue.Rescue(fake.ServiceClient(), "3f54d05f-3430-4d80-aa07-63e6af9e2488", rescueunrescue.RescueOpts{ + AdminPass: "aUPtawPzE9NU", + RescueImageRef: "115e5c5b-72f0-4a0a-9067-60706545248c", + }).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "aUPtawPzE9NU", s) +} + +func TestUnrescue(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/3f54d05f-3430-4d80-aa07-63e6af9e2488/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, UnrescueRequest) + + w.WriteHeader(http.StatusAccepted) + }) + + err := rescueunrescue.Unrescue(fake.ServiceClient(), "3f54d05f-3430-4d80-aa07-63e6af9e2488").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/urls.go new file mode 100644 index 000000000000..fa5cd3735ff5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/rescueunrescue/urls.go @@ -0,0 +1,7 @@ +package rescueunrescue + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/doc.go new file mode 100644 index 000000000000..00f004b02243 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/doc.go @@ -0,0 +1,13 @@ +/* +Package resetstate provides functionality to reset the state of a server that has +been provisioned by the OpenStack Compute service. + +Example to Reset a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + err := resetstate.ResetState(client, id, resetstate.StateActive).ExtractErr() + if err != nil { + panic(err) + } +*/ +package resetstate diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/requests.go new file mode 100644 index 000000000000..628e44aa4751 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/requests.go @@ -0,0 +1,23 @@ +package resetstate + +import ( + "github.com/gophercloud/gophercloud" +) + +// ServerState refers to the states usable in ResetState Action +type ServerState string + +const ( + // StateActive returns the state of the server as active + StateActive ServerState = "active" + + // StateError returns the state of the server as error + StateError ServerState = "error" +) + +// ResetState will reset the state of a server +func ResetState(client *gophercloud.ServiceClient, id string, state ServerState) (r ResetResult) { + stateMap := map[string]interface{}{"state": state} + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-resetState": stateMap}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/results.go new file mode 100644 index 000000000000..ddeb3519a190 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/results.go @@ -0,0 +1,11 @@ +package resetstate + +import ( + "github.com/gophercloud/gophercloud" +) + +// ResetResult is the response of a ResetState operation. Call its ExtractErr +// method to determine if the request suceeded or failed. +type ResetResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/fixtures.go new file mode 100644 index 000000000000..857a8b2127dc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/fixtures.go @@ -0,0 +1,19 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockResetStateResponse(t *testing.T, id string, state string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, fmt.Sprintf(`{"os-resetState": {"state": "%s"}}`, state)) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/requests_test.go new file mode 100644 index 000000000000..491a7ee1fb0f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/testing/requests_test.go @@ -0,0 +1,21 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "b16ba811-199d-4ffd-8839-ba96c1185a67" + +func TestResetState(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockResetStateResponse(t, serverID, "active") + + err := resetstate.ResetState(client.ServiceClient(), serverID, "active").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/urls.go new file mode 100644 index 000000000000..c6da6d9304b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/resetstate/urls.go @@ -0,0 +1,9 @@ +package resetstate + +import ( + "github.com/gophercloud/gophercloud" +) + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go new file mode 100644 index 000000000000..2d9d3acdecad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/doc.go @@ -0,0 +1,76 @@ +/* +Package schedulerhints extends the server create request with the ability to +specify additional parameters which determine where the server will be +created in the OpenStack cloud. + +Example to Add a Server to a Server Group + + schedulerHints := schedulerhints.SchedulerHints{ + Group: "servergroup-uuid", + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on a Different Host than Server A + + schedulerHints := schedulerhints.SchedulerHints{ + DifferentHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Place Server B on the Same Host as Server A + + schedulerHints := schedulerhints.SchedulerHints{ + SameHost: []string{ + "server-a-uuid", + } + } + + serverCreateOpts := servers.CreateOpts{ + Name: "server_b", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + createOpts := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: serverCreateOpts, + SchedulerHints: schedulerHints, + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package schedulerhints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go new file mode 100644 index 000000000000..3fabeddef3b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/requests.go @@ -0,0 +1,164 @@ +package schedulerhints + +import ( + "net" + "regexp" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" +) + +// SchedulerHints represents a set of scheduling hints that are passed to the +// OpenStack scheduler. +type SchedulerHints struct { + // Group specifies a Server Group to place the instance in. + Group string + + // DifferentHost will place the instance on a compute node that does not + // host the given instances. + DifferentHost []string + + // SameHost will place the instance on a compute node that hosts the given + // instances. + SameHost []string + + // Query is a conditional statement that results in compute nodes able to + // host the instance. + Query []interface{} + + // TargetCell specifies a cell name where the instance will be placed. + TargetCell string `json:"target_cell,omitempty"` + + // BuildNearHostIP specifies a subnet of compute nodes to host the instance. + BuildNearHostIP string + + // AdditionalProperies are arbitrary key/values that are not validated by nova. + AdditionalProperties map[string]interface{} +} + +// CreateOptsBuilder builds the scheduler hints into a serializable format. +type CreateOptsBuilder interface { + ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) +} + +// ToServerSchedulerHintsMap builds the scheduler hints into a serializable format. +func (opts SchedulerHints) ToServerSchedulerHintsCreateMap() (map[string]interface{}, error) { + sh := make(map[string]interface{}) + + uuidRegex, _ := regexp.Compile("^[a-z0-9]{8}-[a-z0-9]{4}-[1-5][a-z0-9]{3}-[a-z0-9]{4}-[a-z0-9]{12}$") + + if opts.Group != "" { + if !uuidRegex.MatchString(opts.Group) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Group" + err.Value = opts.Group + err.Info = "Group must be a UUID" + return nil, err + } + sh["group"] = opts.Group + } + + if len(opts.DifferentHost) > 0 { + for _, diffHost := range opts.DifferentHost { + if !uuidRegex.MatchString(diffHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.DifferentHost" + err.Value = opts.DifferentHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["different_host"] = opts.DifferentHost + } + + if len(opts.SameHost) > 0 { + for _, sameHost := range opts.SameHost { + if !uuidRegex.MatchString(sameHost) { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.SameHost" + err.Value = opts.SameHost + err.Info = "The hosts must be in UUID format." + return nil, err + } + } + sh["same_host"] = opts.SameHost + } + + /* + Query can be something simple like: + [">=", "$free_ram_mb", 1024] + + Or more complex like: + ['and', + ['>=', '$free_ram_mb', 1024], + ['>=', '$free_disk_mb', 200 * 1024] + ] + + Because of the possible complexity, just make sure the length is a minimum of 3. + */ + if len(opts.Query) > 0 { + if len(opts.Query) < 3 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.Query" + err.Value = opts.Query + err.Info = "Must be a conditional statement in the format of [op,variable,value]" + return nil, err + } + sh["query"] = opts.Query + } + + if opts.TargetCell != "" { + sh["target_cell"] = opts.TargetCell + } + + if opts.BuildNearHostIP != "" { + if _, _, err := net.ParseCIDR(opts.BuildNearHostIP); err != nil { + err := gophercloud.ErrInvalidInput{} + err.Argument = "schedulerhints.SchedulerHints.BuildNearHostIP" + err.Value = opts.BuildNearHostIP + err.Info = "Must be a valid subnet in the form 192.168.1.1/24" + return nil, err + } + ipParts := strings.Split(opts.BuildNearHostIP, "/") + sh["build_near_host_ip"] = ipParts[0] + sh["cidr"] = "/" + ipParts[1] + } + + if opts.AdditionalProperties != nil { + for k, v := range opts.AdditionalProperties { + sh[k] = v + } + } + + return sh, nil +} + +// CreateOptsExt adds a SchedulerHints option to the base CreateOpts. +type CreateOptsExt struct { + servers.CreateOptsBuilder + + // SchedulerHints provides a set of hints to the scheduler. + SchedulerHints CreateOptsBuilder +} + +// ToServerCreateMap adds the SchedulerHints option to the base server creation options. +func (opts CreateOptsExt) ToServerCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToServerCreateMap() + if err != nil { + return nil, err + } + + schedulerHints, err := opts.SchedulerHints.ToServerSchedulerHintsCreateMap() + if err != nil { + return nil, err + } + + if len(schedulerHints) == 0 { + return base, nil + } + + base["os:scheduler_hints"] = schedulerHints + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/doc.go new file mode 100644 index 000000000000..1915aef2fed5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/doc.go @@ -0,0 +1,2 @@ +// schedulerhints unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/requests_test.go new file mode 100644 index 000000000000..c387eab86050 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints/testing/requests_test.go @@ -0,0 +1,131 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/schedulerhints" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreateOpts(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + schedulerHints := schedulerhints.SchedulerHints{ + Group: "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + DifferentHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + SameHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + Query: []interface{}{">=", "$free_ram_mb", "1024"}, + TargetCell: "foobar", + BuildNearHostIP: "192.168.1.1/24", + AdditionalProperties: map[string]interface{}{"reservation": "a0cf03a5-d921-4877-bb5c-86d26cf818e1"}, + } + + ext := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: base, + SchedulerHints: schedulerHints, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1" + }, + "os:scheduler_hints": { + "group": "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + "different_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "query": [ + ">=", "$free_ram_mb", "1024" + ], + "target_cell": "foobar", + "build_near_host_ip": "192.168.1.1", + "cidr": "/24", + "reservation": "a0cf03a5-d921-4877-bb5c-86d26cf818e1" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} + +func TestCreateOptsWithComplexQuery(t *testing.T) { + base := servers.CreateOpts{ + Name: "createdserver", + ImageRef: "asdfasdfasdf", + FlavorRef: "performance1-1", + } + + schedulerHints := schedulerhints.SchedulerHints{ + Group: "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + DifferentHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + SameHost: []string{ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + }, + Query: []interface{}{"and", []string{">=", "$free_ram_mb", "1024"}, []string{">=", "$free_disk_mb", "204800"}}, + TargetCell: "foobar", + BuildNearHostIP: "192.168.1.1/24", + AdditionalProperties: map[string]interface{}{"reservation": "a0cf03a5-d921-4877-bb5c-86d26cf818e1"}, + } + + ext := schedulerhints.CreateOptsExt{ + CreateOptsBuilder: base, + SchedulerHints: schedulerHints, + } + + expected := ` + { + "server": { + "name": "createdserver", + "imageRef": "asdfasdfasdf", + "flavorRef": "performance1-1" + }, + "os:scheduler_hints": { + "group": "101aed42-22d9-4a3e-9ba1-21103b0d1aba", + "different_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287" + ], + "query": [ + "and", + [">=", "$free_ram_mb", "1024"], + [">=", "$free_disk_mb", "204800"] + ], + "target_cell": "foobar", + "build_near_host_ip": "192.168.1.1", + "cidr": "/24", + "reservation": "a0cf03a5-d921-4877-bb5c-86d26cf818e1" + } + } + ` + actual, err := ext.ToServerCreateMap() + th.AssertNoErr(t, err) + th.CheckJSONEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go new file mode 100644 index 000000000000..8d3ebf2e5d5c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/doc.go @@ -0,0 +1,112 @@ +/* +Package secgroups provides the ability to manage security groups through the +Nova API. + +This API has been deprecated and will be removed from a future release of the +Nova API service. + +For environments that support this extension, this package can be used +regardless of if either Neutron or nova-network is used as the cloud's network +service. + +Example to List Security Groups + + allPages, err := secroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to List Security Groups by Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + + allPages, err := secroups.ListByServer(computeClient, serverID).AllPages() + if err != nil { + panic(err) + } + + allSecurityGroups, err := secgroups.ExtractSecurityGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allSecurityGroups { + fmt.Printf("%+v\n", sg) + } + +Example to Create a Security Group + + createOpts := secgroups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + sg, err := secgroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Security Group Rule + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + createOpts := secgroups.CreateRuleOpts{ + ParentGroupID: sgID, + FromPort: 22, + ToPort: 22, + IPProtocol: "tcp", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Add a Security Group to a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.AddServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Remove a Security Group from a Server + + serverID := "aab3ad01-9956-4623-a29b-24afc89a7d36" + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + err := secgroups.RemoveServer(computeClient, serverID, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + + sgID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := secgroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "6221fe3e-383d-46c9-a3a6-845e66c1e8b4" + err := secgroups.DeleteRule(computeClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package secgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go new file mode 100644 index 000000000000..8b93f08b07e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/requests.go @@ -0,0 +1,183 @@ +package secgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +func commonList(client *gophercloud.ServiceClient, url string) pagination.Pager { + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecurityGroupPage{pagination.SinglePageBase(r)} + }) +} + +// List will return a collection of all the security groups for a particular +// tenant. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return commonList(client, rootURL(client)) +} + +// ListByServer will return a collection of all the security groups which are +// associated with a particular server. +func ListByServer(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return commonList(client, listByServerURL(client, serverID)) +} + +// GroupOpts is the underlying struct responsible for creating or updating +// security groups. It therefore represents the mutable attributes of a +// security group. +type GroupOpts struct { + // the name of your security group. + Name string `json:"name" required:"true"` + // the description of your security group. + Description string `json:"description" required:"true"` +} + +// CreateOpts is the struct responsible for creating a security group. +type CreateOpts GroupOpts + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create will create a new security group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateOpts is the struct responsible for updating an existing security group. +type UpdateOpts GroupOpts + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update will modify the mutable properties of a security group, notably its +// name and description. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(resourceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get will return details for a particular security group. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete will permanently delete a security group from the project. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} + +// CreateRuleOpts represents the configuration for adding a new rule to an +// existing security group. +type CreateRuleOpts struct { + // ID is the ID of the group that this rule will be added to. + ParentGroupID string `json:"parent_group_id" required:"true"` + + // FromPort is the lower bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + FromPort int `json:"from_port"` + + // ToPort is the upper bound of the port range that will be opened. + // Use -1 to allow all ICMP traffic. + ToPort int `json:"to_port"` + + // IPProtocol the protocol type that will be allowed, e.g. TCP. + IPProtocol string `json:"ip_protocol" required:"true"` + + // CIDR is the network CIDR to allow traffic from. + // This is ONLY required if FromGroupID is blank. This represents the IP + // range that will be the source of network traffic to your security group. + // Use 0.0.0.0/0 to allow all IP addresses. + CIDR string `json:"cidr,omitempty" or:"FromGroupID"` + + // FromGroupID represents another security group to allow access. + // This is ONLY required if CIDR is blank. This value represents the ID of a + // group that forwards traffic to the parent group. So, instead of accepting + // network traffic from an entire IP range, you can instead refine the + // inbound source by an existing security group. + FromGroupID string `json:"group_id,omitempty" or:"CIDR"` +} + +// CreateRuleOptsBuilder allows extensions to add additional parameters to the +// CreateRule request. +type CreateRuleOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// ToRuleCreateMap builds a request body from CreateRuleOpts. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// CreateRule will add a new rule to an existing security group (whose ID is +// specified in CreateRuleOpts). You have the option of controlling inbound +// traffic from either an IP range (CIDR) or from another security group. +func CreateRule(client *gophercloud.ServiceClient, opts CreateRuleOptsBuilder) (r CreateRuleResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootRuleURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteRule will permanently delete a rule from a security group. +func DeleteRule(client *gophercloud.ServiceClient, id string) (r DeleteRuleResult) { + _, r.Err = client.Delete(resourceRuleURL(client, id), nil) + return +} + +func actionMap(prefix, groupName string) map[string]map[string]string { + return map[string]map[string]string{ + prefix + "SecurityGroup": map[string]string{"name": groupName}, + } +} + +// AddServer will associate a server and a security group, enforcing the +// rules of the group on the server. +func AddServer(client *gophercloud.ServiceClient, serverID, groupName string) (r AddServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("add", groupName), nil, nil) + return +} + +// RemoveServer will disassociate a server from a security group. +func RemoveServer(client *gophercloud.ServiceClient, serverID, groupName string) (r RemoveServerResult) { + _, r.Err = client.Post(serverActionURL(client, serverID), actionMap("remove", groupName), nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go new file mode 100644 index 000000000000..046889220654 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/results.go @@ -0,0 +1,214 @@ +package secgroups + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecurityGroup represents a security group. +type SecurityGroup struct { + // The unique ID of the group. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The human-readable name of the group, which needs to be unique. + Name string `json:"name"` + + // The human-readable description of the group. + Description string `json:"description"` + + // The rules which determine how this security group operates. + Rules []Rule `json:"rules"` + + // The ID of the tenant to which this security group belongs. + TenantID string `json:"tenant_id"` +} + +func (r *SecurityGroup) UnmarshalJSON(b []byte) error { + type tmp SecurityGroup + var s struct { + tmp + ID interface{} `json:"id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = SecurityGroup(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + return err +} + +// Rule represents a security group rule, a policy which determines how a +// security group operates and what inbound traffic it allows in. +type Rule struct { + // The unique ID. If Neutron is installed, this ID will be + // represented as a string UUID; if Neutron is not installed, it will be a + // numeric ID. For the sake of consistency, we always cast it to a string. + ID string `json:"-"` + + // The lower bound of the port range which this security group should open up. + FromPort int `json:"from_port"` + + // The upper bound of the port range which this security group should open up. + ToPort int `json:"to_port"` + + // The IP protocol (e.g. TCP) which the security group accepts. + IPProtocol string `json:"ip_protocol"` + + // The CIDR IP range whose traffic can be received. + IPRange IPRange `json:"ip_range"` + + // The security group ID to which this rule belongs. + ParentGroupID string `json:"-"` + + // Not documented. + Group Group +} + +func (r *Rule) UnmarshalJSON(b []byte) error { + type tmp Rule + var s struct { + tmp + ID interface{} `json:"id"` + ParentGroupID interface{} `json:"parent_group_id"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Rule(s.tmp) + + switch t := s.ID.(type) { + case float64: + r.ID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ID = t + } + + switch t := s.ParentGroupID.(type) { + case float64: + r.ParentGroupID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.ParentGroupID = t + } + + return err +} + +// IPRange represents the IP range whose traffic will be accepted by the +// security group. +type IPRange struct { + CIDR string +} + +// Group represents a group. +type Group struct { + TenantID string `json:"tenant_id"` + Name string +} + +// SecurityGroupPage is a single page of a SecurityGroup collection. +type SecurityGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Security Groups contains any +// results. +func (page SecurityGroupPage) IsEmpty() (bool, error) { + users, err := ExtractSecurityGroups(page) + return len(users) == 0, err +} + +// ExtractSecurityGroups returns a slice of SecurityGroups contained in a +// single page of results. +func ExtractSecurityGroups(r pagination.Page) ([]SecurityGroup, error) { + var s struct { + SecurityGroups []SecurityGroup `json:"security_groups"` + } + err := (r.(SecurityGroupPage)).ExtractInto(&s) + return s.SecurityGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret the result as a SecurityGroup. +type UpdateResult struct { + commonResult +} + +// Extract will extract a SecurityGroup struct from most responses. +func (r commonResult) Extract() (*SecurityGroup, error) { + var s struct { + SecurityGroup *SecurityGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecurityGroup, err +} + +// CreateRuleResult represents the result when adding rules to a security group. +// Call its Extract method to interpret the result as a Rule. +type CreateRuleResult struct { + gophercloud.Result +} + +// Extract will extract a Rule struct from a CreateRuleResult. +func (r CreateRuleResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// DeleteResult is the response from delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DeleteRuleResult is the response from a DeleteRule operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteRuleResult struct { + gophercloud.ErrResult +} + +// AddServerResult is the response from an AddServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type AddServerResult struct { + gophercloud.ErrResult +} + +// RemoveServerResult is the response from a RemoveServer operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type RemoveServerResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/doc.go new file mode 100644 index 000000000000..c5e60ea0940e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/doc.go @@ -0,0 +1,2 @@ +// secgroups unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/fixtures.go new file mode 100644 index 000000000000..27bd56f3646e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/fixtures.go @@ -0,0 +1,326 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const rootPath = "/os-security-groups" + +const listGroupsJSON = ` +{ + "security_groups": [ + { + "description": "default", + "id": "{groupID}", + "name": "default", + "rules": [], + "tenant_id": "openstack" + } + ] +} +` + +func mockListGroupsResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, listGroupsJSON) + }) +} + +func mockListGroupsByServerResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s%s", serverID, rootPath) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, listGroupsJSON) + }) +} + +func mockCreateGroupResponse(t *testing.T) { + th.Mux.HandleFunc(rootPath, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group": { + "name": "test", + "description": "something" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "something", + "id": "{groupID}", + "name": "test", + "rules": [], + "tenant_id": "openstack" + } +} +`) + }) +} + +func mockUpdateGroupResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group": { + "name": "new_name", + "description": "new_desc" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "something", + "id": "{groupID}", + "name": "new_name", + "rules": [], + "tenant_id": "openstack" + } +} +`) + }) +} + +func mockGetGroupsResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "description": "default", + "id": "{groupID}", + "name": "default", + "rules": [ + { + "from_port": 80, + "group": { + "tenant_id": "openstack", + "name": "default" + }, + "ip_protocol": "TCP", + "to_port": 85, + "parent_group_id": "{groupID}", + "ip_range": { + "cidr": "0.0.0.0" + }, + "id": "{ruleID}" + } + ], + "tenant_id": "openstack" + } +} + `) + }) +} + +func mockGetNumericIDGroupResponse(t *testing.T, groupID int) { + url := fmt.Sprintf("%s/%d", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "id": %d + } +} + `, groupID) + }) +} + +func mockGetNumericIDGroupRuleResponse(t *testing.T, groupID int) { + url := fmt.Sprintf("%s/%d", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group": { + "id": %d, + "rules": [ + { + "parent_group_id": %d, + "id": %d + } + ] + } +} + `, groupID, groupID, groupID) + }) +} + +func mockDeleteGroupResponse(t *testing.T, groupID string) { + url := fmt.Sprintf("%s/%s", rootPath, groupID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockAddRuleResponse(t *testing.T) { + th.Mux.HandleFunc("/os-security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_rule": { + "from_port": 22, + "ip_protocol": "TCP", + "to_port": 22, + "parent_group_id": "{groupID}", + "cidr": "0.0.0.0/0" + } +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "from_port": 22, + "group": {}, + "ip_protocol": "TCP", + "to_port": 22, + "parent_group_id": "{groupID}", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "id": "{ruleID}" + } +}`) + }) +} + +func mockAddRuleResponseICMPZero(t *testing.T) { + th.Mux.HandleFunc("/os-security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "security_group_rule": { + "from_port": 0, + "ip_protocol": "ICMP", + "to_port": 0, + "parent_group_id": "{groupID}", + "cidr": "0.0.0.0/0" + } +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "from_port": 0, + "group": {}, + "ip_protocol": "ICMP", + "to_port": 0, + "parent_group_id": "{groupID}", + "ip_range": { + "cidr": "0.0.0.0/0" + }, + "id": "{ruleID}" + } +}`) + }) +} + +func mockDeleteRuleResponse(t *testing.T, ruleID string) { + url := fmt.Sprintf("/os-security-group-rules/%s", ruleID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockAddServerToGroupResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s/action", serverID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "addSecurityGroup": { + "name": "test" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockRemoveServerFromGroupResponse(t *testing.T, serverID string) { + url := fmt.Sprintf("/servers/%s/action", serverID) + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "removeSecurityGroup": { + "name": "test" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/requests_test.go new file mode 100644 index 000000000000..b5207646be3e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/testing/requests_test.go @@ -0,0 +1,302 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ( + serverID = "{serverID}" + groupID = "{groupID}" + ruleID = "{ruleID}" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListGroupsResponse(t) + + count := 0 + + err := secgroups.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := secgroups.ExtractSecurityGroups(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []secgroups.SecurityGroup{ + { + ID: groupID, + Description: "default", + Name: "default", + Rules: []secgroups.Rule{}, + TenantID: "openstack", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestListByServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListGroupsByServerResponse(t, serverID) + + count := 0 + + err := secgroups.ListByServer(client.ServiceClient(), serverID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := secgroups.ExtractSecurityGroups(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []secgroups.SecurityGroup{ + { + ID: groupID, + Description: "default", + Name: "default", + Rules: []secgroups.Rule{}, + TenantID: "openstack", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateGroupResponse(t) + + opts := secgroups.CreateOpts{ + Name: "test", + Description: "something", + } + + group, err := secgroups.Create(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.SecurityGroup{ + ID: groupID, + Name: "test", + Description: "something", + TenantID: "openstack", + Rules: []secgroups.Rule{}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateGroupResponse(t, groupID) + + opts := secgroups.UpdateOpts{ + Name: "new_name", + Description: "new_desc", + } + + group, err := secgroups.Update(client.ServiceClient(), groupID, opts).Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.SecurityGroup{ + ID: groupID, + Name: "new_name", + Description: "something", + TenantID: "openstack", + Rules: []secgroups.Rule{}, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetGroupsResponse(t, groupID) + + group, err := secgroups.Get(client.ServiceClient(), groupID).Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.SecurityGroup{ + ID: groupID, + Description: "default", + Name: "default", + TenantID: "openstack", + Rules: []secgroups.Rule{ + { + FromPort: 80, + ToPort: 85, + IPProtocol: "TCP", + IPRange: secgroups.IPRange{CIDR: "0.0.0.0"}, + Group: secgroups.Group{TenantID: "openstack", Name: "default"}, + ParentGroupID: groupID, + ID: ruleID, + }, + }, + } + + th.AssertDeepEquals(t, expected, group) +} + +func TestGetNumericID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + numericGroupID := 12345 + + mockGetNumericIDGroupResponse(t, numericGroupID) + + group, err := secgroups.Get(client.ServiceClient(), "12345").Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.SecurityGroup{ID: "12345"} + th.AssertDeepEquals(t, expected, group) +} + +func TestGetNumericRuleID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + numericGroupID := 12345 + + mockGetNumericIDGroupRuleResponse(t, numericGroupID) + + group, err := secgroups.Get(client.ServiceClient(), "12345").Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.SecurityGroup{ + ID: "12345", + Rules: []secgroups.Rule{ + { + ParentGroupID: "12345", + ID: "12345", + }, + }, + } + th.AssertDeepEquals(t, expected, group) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteGroupResponse(t, groupID) + + err := secgroups.Delete(client.ServiceClient(), groupID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAddRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddRuleResponse(t) + + opts := secgroups.CreateRuleOpts{ + ParentGroupID: groupID, + FromPort: 22, + ToPort: 22, + IPProtocol: "TCP", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.Rule{ + FromPort: 22, + ToPort: 22, + Group: secgroups.Group{}, + IPProtocol: "TCP", + ParentGroupID: groupID, + IPRange: secgroups.IPRange{CIDR: "0.0.0.0/0"}, + ID: ruleID, + } + + th.AssertDeepEquals(t, expected, rule) +} + +func TestAddRuleICMPZero(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddRuleResponseICMPZero(t) + + opts := secgroups.CreateRuleOpts{ + ParentGroupID: groupID, + FromPort: 0, + ToPort: 0, + IPProtocol: "ICMP", + CIDR: "0.0.0.0/0", + } + + rule, err := secgroups.CreateRule(client.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := &secgroups.Rule{ + FromPort: 0, + ToPort: 0, + Group: secgroups.Group{}, + IPProtocol: "ICMP", + ParentGroupID: groupID, + IPRange: secgroups.IPRange{CIDR: "0.0.0.0/0"}, + ID: ruleID, + } + + th.AssertDeepEquals(t, expected, rule) +} + +func TestDeleteRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteRuleResponse(t, ruleID) + + err := secgroups.DeleteRule(client.ServiceClient(), ruleID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestAddServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockAddServerToGroupResponse(t, serverID) + + err := secgroups.AddServer(client.ServiceClient(), serverID, "test").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestRemoveServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockRemoveServerFromGroupResponse(t, serverID) + + err := secgroups.RemoveServer(client.ServiceClient(), serverID, "test").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go new file mode 100644 index 000000000000..d99746cae928 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/secgroups/urls.go @@ -0,0 +1,32 @@ +package secgroups + +import "github.com/gophercloud/gophercloud" + +const ( + secgrouppath = "os-security-groups" + rulepath = "os-security-group-rules" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(secgrouppath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(secgrouppath) +} + +func listByServerURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, secgrouppath) +} + +func rootRuleURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rulepath) +} + +func resourceRuleURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rulepath, id) +} + +func serverActionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("servers", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go new file mode 100644 index 000000000000..814bde37f3c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/doc.go @@ -0,0 +1,40 @@ +/* +Package servergroups provides the ability to manage server groups. + +Example to List Server Groups + + allpages, err := servergroups.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allServerGroups, err := servergroups.ExtractServerGroups(allPages) + if err != nil { + panic(err) + } + + for _, sg := range allServerGroups { + fmt.Printf("%#v\n", sg) + } + +Example to Create a Server Group + + createOpts := servergroups.CreateOpts{ + Name: "my_sg", + Policies: []string{"anti-affinity"}, + } + + sg, err := servergroups.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server Group + + sgID := "7a6f29ad-e34d-4368-951a-58a08f11cfb7" + err := servergroups.Delete(computeClient, sgID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package servergroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go new file mode 100644 index 000000000000..1439a5a34ce1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/requests.go @@ -0,0 +1,59 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// ServerGroups. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServerGroupPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies Server Group creation parameters. +type CreateOpts struct { + // Name is the name of the server group + Name string `json:"name" required:"true"` + + // Policies are the server group policies + Policies []string `json:"policies" required:"true"` +} + +// ToServerGroupCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToServerGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server_group") +} + +// Create requests the creation of a new Server Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServerGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns data about a previously created ServerGroup. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete requests the deletion of a previously allocated ServerGroup. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go new file mode 100644 index 000000000000..b9aeef981541 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/results.go @@ -0,0 +1,87 @@ +package servergroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A ServerGroup creates a policy for instance placement in the cloud. +type ServerGroup struct { + // ID is the unique ID of the Server Group. + ID string `json:"id"` + + // Name is the common name of the server group. + Name string `json:"name"` + + // Polices are the group policies. + // + // Normally a single policy is applied: + // + // "affinity" will place all servers within the server group on the + // same compute node. + // + // "anti-affinity" will place servers within the server group on different + // compute nodes. + Policies []string `json:"policies"` + + // Members are the members of the server group. + Members []string `json:"members"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the Server Group. + Metadata map[string]interface{} +} + +// ServerGroupPage stores a single page of all ServerGroups results from a +// List call. +type ServerGroupPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a ServerGroupsPage is empty. +func (page ServerGroupPage) IsEmpty() (bool, error) { + va, err := ExtractServerGroups(page) + return len(va) == 0, err +} + +// ExtractServerGroups interprets a page of results as a slice of +// ServerGroups. +func ExtractServerGroups(r pagination.Page) ([]ServerGroup, error) { + var s struct { + ServerGroups []ServerGroup `json:"server_groups"` + } + err := (r.(ServerGroupPage)).ExtractInto(&s) + return s.ServerGroups, err +} + +type ServerGroupResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Server Group resource +// response as a ServerGroup struct. +func (r ServerGroupResult) Extract() (*ServerGroup, error) { + var s struct { + ServerGroup *ServerGroup `json:"server_group"` + } + err := r.ExtractInto(&s) + return s.ServerGroup, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a ServerGroup. +type CreateResult struct { + ServerGroupResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a ServerGroup. +type GetResult struct { + ServerGroupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/doc.go new file mode 100644 index 000000000000..644bb49df1ff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/doc.go @@ -0,0 +1,2 @@ +// servergroups unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/fixtures.go new file mode 100644 index 000000000000..b53757a40f38 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/fixtures.go @@ -0,0 +1,160 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "server_groups": [ + { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + }, + { + "id": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "name": "test2", + "policies": [ + "affinity" + ], + "members": [], + "metadata": {} + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// CreateOutput is a sample response to a Post call +const CreateOutput = ` +{ + "server_group": { + "id": "616fb98f-46ca-475e-917e-2563e5a8cd19", + "name": "test", + "policies": [ + "anti-affinity" + ], + "members": [], + "metadata": {} + } +} +` + +// FirstServerGroup is the first result in ListOutput. +var FirstServerGroup = servergroups.ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// SecondServerGroup is the second result in ListOutput. +var SecondServerGroup = servergroups.ServerGroup{ + ID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + Name: "test2", + Policies: []string{ + "affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// ExpectedServerGroupSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedServerGroupSlice = []servergroups.ServerGroup{FirstServerGroup, SecondServerGroup} + +// CreatedServerGroup is the parsed result from CreateOutput. +var CreatedServerGroup = servergroups.ServerGroup{ + ID: "616fb98f-46ca-475e-917e-2563e5a8cd19", + Name: "test", + Policies: []string{ + "anti-affinity", + }, + Members: []string{}, + Metadata: map[string]interface{}{}, +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing server group +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/4d8c3732-a248-40ed-bebc-539a6ffd25c0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new server group +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "server_group": { + "name": "test", + "policies": [ + "anti-affinity" + ] + } +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing server group +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-server-groups/616fb98f-46ca-475e-917e-2563e5a8cd19", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/requests_test.go new file mode 100644 index 000000000000..d86fa568610c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/testing/requests_test.go @@ -0,0 +1,60 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := servergroups.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := servergroups.ExtractServerGroups(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServerGroupSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + actual, err := servergroups.Create(client.ServiceClient(), servergroups.CreateOpts{ + Name: "test", + Policies: []string{"anti-affinity"}, + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedServerGroup, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := servergroups.Get(client.ServiceClient(), "4d8c3732-a248-40ed-bebc-539a6ffd25c0").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstServerGroup, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := servergroups.Delete(client.ServiceClient(), "616fb98f-46ca-475e-917e-2563e5a8cd19").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go new file mode 100644 index 000000000000..9a1f99b19973 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/servergroups/urls.go @@ -0,0 +1,25 @@ +package servergroups + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-server-groups" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return getURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/doc.go new file mode 100644 index 000000000000..0f3127f04254 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/doc.go @@ -0,0 +1,20 @@ +/* +Package serverusage provides the ability the ability to extend a server result +with the extended usage information. + +Example to Get an extended information: + + type serverUsageExt struct { + servers.Server + serverusage.UsageExt + } + var serverWithUsageExt serverUsageExt + + err := servers.Get(computeClient, "d650a0ce-17c3-497d-961a-43c4af80998a").ExtractInto(&serverWithUsageExt) + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", serverWithUsageExt) +*/ +package serverusage diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/results.go new file mode 100644 index 000000000000..a80abb6bc95b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/results.go @@ -0,0 +1,34 @@ +package serverusage + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// UsageExt represents OS-SRV-USG server response fields. +type UsageExt struct { + LaunchedAt time.Time `json:"-"` + TerminatedAt time.Time `json:"-"` +} + +// UnmarshalJSON helps to unmarshal UsageExt fields into needed values. +func (r *UsageExt) UnmarshalJSON(b []byte) error { + type tmp UsageExt + var s struct { + tmp + LaunchedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:launched_at"` + TerminatedAt gophercloud.JSONRFC3339MilliNoZ `json:"OS-SRV-USG:terminated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = UsageExt(s.tmp) + + r.LaunchedAt = time.Time(s.LaunchedAt) + r.TerminatedAt = time.Time(s.TerminatedAt) + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/fixtures.go new file mode 100644 index 000000000000..2d2cf2ef538b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/fixtures.go @@ -0,0 +1,20 @@ +package testing + +// ServerWithUsageExtResult represents a raw server response from the Compute API +// with OS-SRV-USG data. +// Most of the actual fields were deleted from the response. +const ServerWithUsageExtResult = ` +{ + "server": { + "OS-SRV-USG:launched_at": "2018-07-27T09:15:55.000000", + "OS-SRV-USG:terminated_at": null, + "created": "2018-07-27T09:15:48Z", + "updated": "2018-07-27T09:15:55Z", + "id": "d650a0ce-17c3-497d-961a-43c4af80998a", + "name": "test_instance", + "status": "ACTIVE", + "user_id": "0f2f3822679e4b3ea073e5d1c6ed5f02", + "tenant_id": "424e7cf0243c468ca61732ba45973b3e" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/requests_test.go new file mode 100644 index 000000000000..b05dee5786db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage/testing/requests_test.go @@ -0,0 +1,44 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/serverusage" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestServerWithUsageExt(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/d650a0ce-17c3-497d-961a-43c4af80998a", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, ServerWithUsageExtResult) + }) + + type serverUsageExt struct { + servers.Server + serverusage.UsageExt + } + var serverWithUsageExt serverUsageExt + err := servers.Get(fake.ServiceClient(), "d650a0ce-17c3-497d-961a-43c4af80998a").ExtractInto(&serverWithUsageExt) + th.AssertNoErr(t, err) + + th.AssertEquals(t, serverWithUsageExt.LaunchedAt, time.Date(2018, 07, 27, 9, 15, 55, 0, time.UTC)) + th.AssertEquals(t, serverWithUsageExt.TerminatedAt, time.Time{}) + th.AssertEquals(t, serverWithUsageExt.Created, time.Date(2018, 07, 27, 9, 15, 48, 0, time.UTC)) + th.AssertEquals(t, serverWithUsageExt.Updated, time.Date(2018, 07, 27, 9, 15, 55, 0, time.UTC)) + th.AssertEquals(t, serverWithUsageExt.ID, "d650a0ce-17c3-497d-961a-43c4af80998a") + th.AssertEquals(t, serverWithUsageExt.Name, "test_instance") + th.AssertEquals(t, serverWithUsageExt.Status, "ACTIVE") + th.AssertEquals(t, serverWithUsageExt.UserID, "0f2f3822679e4b3ea073e5d1c6ed5f02") + th.AssertEquals(t, serverWithUsageExt.TenantID, "424e7cf0243c468ca61732ba45973b3e") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/doc.go new file mode 100644 index 000000000000..2d38c42a9523 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/doc.go @@ -0,0 +1,22 @@ +/* +Package services returns information about the compute services in the OpenStack +cloud. + +Example of Retrieving list of all services + + allPages, err := services.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } +*/ + +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/requests.go new file mode 100644 index 000000000000..d2e31f82d340 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/requests.go @@ -0,0 +1,13 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List makes a request against the API to list services. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/results.go new file mode 100644 index 000000000000..1ffc99cf9d23 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/results.go @@ -0,0 +1,73 @@ +package services + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Service represents a Compute service in the OpenStack cloud. +type Service struct { + // The binary name of the service. + Binary string `json:"binary"` + + // The reason for disabling a service. + DisabledReason string `json:"disabled_reason"` + + // The name of the host. + Host string `json:"host"` + + // The id of the service. + ID int `json:"id"` + + // The state of the service. One of up or down. + State string `json:"state"` + + // The status of the service. One of enabled or disabled. + Status string `json:"status"` + + // The date and time when the resource was updated. + UpdatedAt time.Time `json:"-"` + + // The availability zone name. + Zone string `json:"zone"` +} + +// UnmarshalJSON to override default +func (r *Service) UnmarshalJSON(b []byte) error { + type tmp Service + var s struct { + tmp + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Service(s.tmp) + + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +// ServicePage represents a single page of all Services from a List request. +type ServicePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Services contains any results. +func (page ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(page) + return len(services) == 0, err +} + +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Service []Service `json:"services"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Service, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/fixtures.go new file mode 100644 index 000000000000..79e704a7a734 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/fixtures.go @@ -0,0 +1,123 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ServiceListBody is sample response to the List call +const ServiceListBody = ` +{ + "services": [ + { + "id": 1, + "binary": "nova-scheduler", + "disabled_reason": "test1", + "host": "host1", + "state": "up", + "status": "disabled", + "updated_at": "2012-10-29T13:42:02.000000", + "forced_down": false, + "zone": "internal" + }, + { + "id": 2, + "binary": "nova-compute", + "disabled_reason": "test2", + "host": "host1", + "state": "up", + "status": "disabled", + "updated_at": "2012-10-29T13:42:05.000000", + "forced_down": false, + "zone": "nova" + }, + { + "id": 3, + "binary": "nova-scheduler", + "disabled_reason": null, + "host": "host2", + "state": "down", + "status": "enabled", + "updated_at": "2012-09-19T06:55:34.000000", + "forced_down": false, + "zone": "internal" + }, + { + "id": 4, + "binary": "nova-compute", + "disabled_reason": "test4", + "host": "host2", + "state": "down", + "status": "disabled", + "updated_at": "2012-09-18T08:03:38.000000", + "forced_down": false, + "zone": "nova" + } + ] +} +` + +// First service from the ServiceListBody +var FirstFakeService = services.Service{ + Binary: "nova-scheduler", + DisabledReason: "test1", + Host: "host1", + ID: 1, + State: "up", + Status: "disabled", + UpdatedAt: time.Date(2012, 10, 29, 13, 42, 2, 0, time.UTC), + Zone: "internal", +} + +// Second service from the ServiceListBody +var SecondFakeService = services.Service{ + Binary: "nova-compute", + DisabledReason: "test2", + Host: "host1", + ID: 2, + State: "up", + Status: "disabled", + UpdatedAt: time.Date(2012, 10, 29, 13, 42, 5, 0, time.UTC), + Zone: "nova", +} + +// Third service from the ServiceListBody +var ThirdFakeService = services.Service{ + Binary: "nova-scheduler", + DisabledReason: "", + Host: "host2", + ID: 3, + State: "down", + Status: "enabled", + UpdatedAt: time.Date(2012, 9, 19, 6, 55, 34, 0, time.UTC), + Zone: "internal", +} + +// Fourth service from the ServiceListBody +var FourthFakeService = services.Service{ + Binary: "nova-compute", + DisabledReason: "test4", + Host: "host2", + ID: 4, + State: "down", + Status: "disabled", + UpdatedAt: time.Date(2012, 9, 18, 8, 3, 38, 0, time.UTC), + Zone: "nova", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ServiceListBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/requests_test.go new file mode 100644 index 000000000000..7f998814bed7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/testing/requests_test.go @@ -0,0 +1,42 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services" + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListServices(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + HandleListSuccessfully(t) + + pages := 0 + err := services.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := services.ExtractServices(page) + if err != nil { + return false, err + } + + if len(actual) != 4 { + t.Fatalf("Expected 4 services, got %d", len(actual)) + } + testhelper.CheckDeepEquals(t, FirstFakeService, actual[0]) + testhelper.CheckDeepEquals(t, SecondFakeService, actual[1]) + testhelper.CheckDeepEquals(t, ThirdFakeService, actual[2]) + testhelper.CheckDeepEquals(t, FourthFakeService, actual[3]) + + return true, nil + }) + + testhelper.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/urls.go new file mode 100644 index 000000000000..61d794007e91 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/services/urls.go @@ -0,0 +1,7 @@ +package services + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-services") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go new file mode 100644 index 000000000000..ab97edb77662 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/doc.go @@ -0,0 +1,19 @@ +/* +Package startstop provides functionality to start and stop servers that have +been provisioned by the OpenStack Compute service. + +Example to Stop and Start a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := startstop.Stop(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err := startstop.Start(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package startstop diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go new file mode 100644 index 000000000000..5b4f3f39dd41 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/requests.go @@ -0,0 +1,19 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Start is the operation responsible for starting a Compute server. +func Start(client *gophercloud.ServiceClient, id string) (r StartResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-start": nil}, nil, nil) + return +} + +// Stop is the operation responsible for stopping a Compute server. +func Stop(client *gophercloud.ServiceClient, id string) (r StopResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"os-stop": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go new file mode 100644 index 000000000000..834968933285 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/results.go @@ -0,0 +1,15 @@ +package startstop + +import "github.com/gophercloud/gophercloud" + +// StartResult is the response from a Start operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StartResult struct { + gophercloud.ErrResult +} + +// StopResult is the response from Stop operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StopResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/doc.go new file mode 100644 index 000000000000..b6c5b8c14083 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/doc.go @@ -0,0 +1,2 @@ +// startstop unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/fixtures.go new file mode 100644 index 000000000000..1086b0e3410e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/fixtures.go @@ -0,0 +1,27 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockStartServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"os-start": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockStopServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"os-stop": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/requests_test.go new file mode 100644 index 000000000000..be45bf5c7152 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop/testing/requests_test.go @@ -0,0 +1,31 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/startstop" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "{serverId}" + +func TestStart(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStartServerResponse(t, serverID) + + err := startstop.Start(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestStop(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockStopServerResponse(t, serverID) + + err := startstop.Stop(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/doc.go new file mode 100644 index 000000000000..9851000e8a2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/doc.go @@ -0,0 +1,19 @@ +/* +Package suspendresume provides functionality to suspend and resume servers that have +been provisioned by the OpenStack Compute service. + +Example to Suspend and Resume a Server + + serverID := "47b6b7b7-568d-40e4-868c-d5c41735532e" + + err := suspendresume.Suspend(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + + err := suspendresume.Resume(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package suspendresume diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/requests.go new file mode 100644 index 000000000000..0abbe7212172 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/requests.go @@ -0,0 +1,19 @@ +package suspendresume + +import "github.com/gophercloud/gophercloud" + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +// Suspend is the operation responsible for suspending a Compute server. +func Suspend(client *gophercloud.ServiceClient, id string) (r SuspendResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"suspend": nil}, nil, nil) + return +} + +// Resume is the operation responsible for resuming a Compute server. +func Resume(client *gophercloud.ServiceClient, id string) (r UnsuspendResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"resume": nil}, nil, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/results.go new file mode 100644 index 000000000000..988d83e4aa2c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/results.go @@ -0,0 +1,15 @@ +package suspendresume + +import "github.com/gophercloud/gophercloud" + +// SuspendResult is the response from a Suspend operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type SuspendResult struct { + gophercloud.ErrResult +} + +// UnsuspendResult is the response from an Unsuspend operation. Call +// its ExtractErr method to determine if the request succeeded or failed. +type UnsuspendResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/doc.go new file mode 100644 index 000000000000..834f25516f9b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/doc.go @@ -0,0 +1,2 @@ +// suspendresume unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/fixtures.go new file mode 100644 index 000000000000..5c6405deebd7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/fixtures.go @@ -0,0 +1,27 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func mockSuspendServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"suspend": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func mockResumeServerResponse(t *testing.T, id string) { + th.Mux.HandleFunc("/servers/"+id+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{"resume": null}`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/requests_test.go new file mode 100644 index 000000000000..7c8e4ec6b125 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume/testing/requests_test.go @@ -0,0 +1,31 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/suspendresume" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const serverID = "{serverId}" + +func TestSuspend(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockSuspendServerResponse(t, serverID) + + err := suspendresume.Suspend(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestResume(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockResumeServerResponse(t, serverID) + + err := suspendresume.Resume(client.ServiceClient(), serverID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go new file mode 100644 index 000000000000..a32e8ffd5878 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/doc.go @@ -0,0 +1,26 @@ +/* +Package tenantnetworks provides the ability for tenants to see information +about the networks they have access to. + +This is a deprecated API and will be removed from the Nova API service in a +future version. + +This API works in both Neutron and nova-network based OpenStack clouds. + +Example to List Networks Available to a Tenant + + allPages, err := tenantnetworks.List(computeClient).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := tenantnetworks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } +*/ +package tenantnetworks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go new file mode 100644 index 000000000000..00899056fdd0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/requests.go @@ -0,0 +1,19 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of Networks. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.SinglePageBase(r)} + }) +} + +// Get returns data about a previously created Network. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go new file mode 100644 index 000000000000..bda77d5f5027 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/results.go @@ -0,0 +1,58 @@ +package tenantnetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A Network represents a network that a server communicates on. +type Network struct { + // CIDR is the IPv4 subnet. + CIDR string `json:"cidr"` + + // ID is the UUID of the network. + ID string `json:"id"` + + // Name is the common name that the network has. + Name string `json:"label"` +} + +// NetworkPage stores a single page of all Networks results from a List call. +type NetworkPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkPage is empty. +func (page NetworkPage) IsEmpty() (bool, error) { + va, err := ExtractNetworks(page) + return len(va) == 0, err +} + +// ExtractNetworks interprets a page of results as a slice of Network. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s struct { + Networks []Network `json:"networks"` + } + err := (r.(NetworkPage)).ExtractInto(&s) + return s.Networks, err +} + +type NetworkResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any Network resource response +// as a Network struct. +func (r NetworkResult) Extract() (*Network, error) { + var s struct { + Network *Network `json:"network"` + } + err := r.ExtractInto(&s) + return s.Network, err +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a Network. +type GetResult struct { + NetworkResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/doc.go new file mode 100644 index 000000000000..4639153ff36f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/doc.go @@ -0,0 +1,2 @@ +// tenantnetworks unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/fixtures.go new file mode 100644 index 000000000000..ae679b46e47f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/fixtures.go @@ -0,0 +1,83 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "networks": [ + { + "cidr": "10.0.0.0/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf047", + "label": "mynet_0" + }, + { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "network": { + "cidr": "10.0.0.10/29", + "id": "20c8acc0-f747-4d71-a389-46d078ebf000", + "label": "mynet_1" + } +} +` + +// FirstNetwork is the first result in ListOutput. +var nilTime time.Time +var FirstNetwork = tenantnetworks.Network{ + CIDR: "10.0.0.0/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf047", + Name: "mynet_0", +} + +// SecondNetwork is the second result in ListOutput. +var SecondNetwork = tenantnetworks.Network{ + CIDR: "10.0.0.10/29", + ID: "20c8acc0-f747-4d71-a389-46d078ebf000", + Name: "mynet_1", +} + +// ExpectedNetworkSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedNetworkSlice = []tenantnetworks.Network{FirstNetwork, SecondNetwork} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing network. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-tenant-networks/20c8acc0-f747-4d71-a389-46d078ebf000", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/requests_test.go new file mode 100644 index 000000000000..703c8468b733 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/testing/requests_test.go @@ -0,0 +1,38 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := tenantnetworks.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := tenantnetworks.ExtractNetworks(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := tenantnetworks.Get(client.ServiceClient(), "20c8acc0-f747-4d71-a389-46d078ebf000").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondNetwork, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go new file mode 100644 index 000000000000..683041ded37a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/tenantnetworks/urls.go @@ -0,0 +1,17 @@ +package tenantnetworks + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-tenant-networks" + +func resourceURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return resourceURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/delegate_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/delegate_test.go new file mode 100644 index 000000000000..822093ff477a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/delegate_test.go @@ -0,0 +1,56 @@ +package testing + +import ( + "testing" + + common "github.com/gophercloud/gophercloud/openstack/common/extensions" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListExtensionsSuccessfully(t) + + count := 0 + extensions.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := extensions.ExtractExtensions(page) + th.AssertNoErr(t, err) + + expected := []common.Extension{ + common.Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", + }, + } + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetExtensionsSuccessfully(t) + + ext, err := extensions.Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00") + th.AssertEquals(t, ext.Name, "agent") + th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0") + th.AssertEquals(t, ext.Alias, "agent") + th.AssertEquals(t, ext.Description, "The agent management extension.") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/doc.go new file mode 100644 index 000000000000..3c5d45926323 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/doc.go @@ -0,0 +1,2 @@ +// extensions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/fixtures.go new file mode 100644 index 000000000000..2a3fb6909460 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/testing/fixtures.go @@ -0,0 +1,57 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func HandleListExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +} + `) + }) +} + +func HandleGetExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/doc.go new file mode 100644 index 000000000000..16b3a284ba40 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/doc.go @@ -0,0 +1,59 @@ +/* +Package usage provides information and interaction with the +SimpleTenantUsage extension for the OpenStack Compute service. + +Due to the way the API responses are formatted, it is not recommended to +query by using the AllPages convenience method. Instead, use the EachPage +method to view each result page-by-page. + +This is because the usage calculations are done _per page_ and not as +an aggregated total of the entire usage set. + +Example to Retrieve Usage for a Single Tenant: + + start := time.Date(2017, 01, 21, 10, 4, 20, 0, time.UTC) + end := time.Date(2017, 01, 21, 10, 4, 20, 0, time.UTC) + + singleTenantOpts := usage.SingleTenantOpts{ + Start: &start, + End: &end, + } + + err := usage.SingleTenant(computeClient, tenantID, singleTenantOpts).EachPage(func(page pagination.Page) (bool, error) { + tenantUsage, err := usage.ExtractSingleTenant(page) + if err != nil { + return false, err + } + + fmt.Printf("%+v\n", tenantUsage) + + return true, nil + }) + + if err != nil { + panic(err) + } + +Example to Retrieve Usage for All Tenants: + + allTenantsOpts := usage.AllTenantsOpts{ + Detailed: true, + } + + err := usage.AllTenants(computeClient, allTenantsOpts).EachPage(func(page pagination.Page) (bool, error) { + allTenantsUsage, err := usage.ExtractAllTenants(page) + if err != nil { + return false, err + } + + fmt.Printf("%+v\n", allTenantsUsage) + + return true, nil + }) + + if err != nil { + panic(err) + } + +*/ +package usage diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/requests.go new file mode 100644 index 000000000000..77312227f6d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/requests.go @@ -0,0 +1,106 @@ +package usage + +import ( + "net/url" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SingleTenantOpts are options for fetching usage of a single tenant. +type SingleTenantOpts struct { + // The ending time to calculate usage statistics on compute and storage resources. + End *time.Time `q:"end"` + + // The beginning time to calculate usage statistics on compute and storage resources. + Start *time.Time `q:"start"` +} + +// SingleTenantOptsBuilder allows extensions to add additional parameters to the +// SingleTenant request. +type SingleTenantOptsBuilder interface { + ToUsageSingleTenantQuery() (string, error) +} + +// ToUsageSingleTenantQuery formats a SingleTenantOpts into a query string. +func (opts SingleTenantOpts) ToUsageSingleTenantQuery() (string, error) { + params := make(url.Values) + if opts.Start != nil { + params.Add("start", opts.Start.Format(gophercloud.RFC3339MilliNoZ)) + } + + if opts.End != nil { + params.Add("end", opts.End.Format(gophercloud.RFC3339MilliNoZ)) + } + + q := &url.URL{RawQuery: params.Encode()} + return q.String(), nil +} + +// SingleTenant returns usage data about a single tenant. +func SingleTenant(client *gophercloud.ServiceClient, tenantID string, opts SingleTenantOptsBuilder) pagination.Pager { + url := getTenantURL(client, tenantID) + if opts != nil { + query, err := opts.ToUsageSingleTenantQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SingleTenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// AllTenantsOpts are options for fetching usage of all tenants. +type AllTenantsOpts struct { + // Detailed will return detailed results. + Detailed bool + + // The ending time to calculate usage statistics on compute and storage resources. + End *time.Time `q:"end"` + + // The beginning time to calculate usage statistics on compute and storage resources. + Start *time.Time `q:"start"` +} + +// AllTenantsOptsBuilder allows extensions to add additional parameters to the +// AllTenants request. +type AllTenantsOptsBuilder interface { + ToUsageAllTenantsQuery() (string, error) +} + +// ToUsageAllTenantsQuery formats a AllTenantsOpts into a query string. +func (opts AllTenantsOpts) ToUsageAllTenantsQuery() (string, error) { + params := make(url.Values) + if opts.Start != nil { + params.Add("start", opts.Start.Format(gophercloud.RFC3339MilliNoZ)) + } + + if opts.End != nil { + params.Add("end", opts.End.Format(gophercloud.RFC3339MilliNoZ)) + } + + if opts.Detailed == true { + params.Add("detailed", "1") + } + + q := &url.URL{RawQuery: params.Encode()} + return q.String(), nil +} + +// AllTenants returns usage data about all tenants. +func AllTenants(client *gophercloud.ServiceClient, opts AllTenantsOptsBuilder) pagination.Pager { + url := allTenantsURL(client) + if opts != nil { + query, err := opts.ToUsageAllTenantsQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AllTenantsPage{pagination.LinkedPageBase{PageResult: r}} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/results.go new file mode 100644 index 000000000000..f446730ec2e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/results.go @@ -0,0 +1,183 @@ +package usage + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// TenantUsage is a set of usage information about a tenant over the sampling window +type TenantUsage struct { + // ServerUsages is an array of ServerUsage maps + ServerUsages []ServerUsage `json:"server_usages"` + + // Start is the beginning time to calculate usage statistics on compute and storage resources + Start time.Time `json:"-"` + + // Stop is the ending time to calculate usage statistics on compute and storage resources + Stop time.Time `json:"-"` + + // TenantID is the ID of the tenant whose usage is being reported on + TenantID string `json:"tenant_id"` + + // TotalHours is the total duration that servers exist (in hours) + TotalHours float64 `json:"total_hours"` + + // TotalLocalGBUsage multiplies the server disk size (in GiB) by hours the server exists, and then adding that all together for each server + TotalLocalGBUsage float64 `json:"total_local_gb_usage"` + + // TotalMemoryMBUsage multiplies the server memory size (in MB) by hours the server exists, and then adding that all together for each server + TotalMemoryMBUsage float64 `json:"total_memory_mb_usage"` + + // TotalVCPUsUsage multiplies the number of virtual CPUs of the server by hours the server exists, and then adding that all together for each server + TotalVCPUsUsage float64 `json:"total_vcpus_usage"` +} + +// UnmarshalJSON sets *u to a copy of data. +func (u *TenantUsage) UnmarshalJSON(b []byte) error { + type tmp TenantUsage + var s struct { + tmp + Start gophercloud.JSONRFC3339MilliNoZ `json:"start"` + Stop gophercloud.JSONRFC3339MilliNoZ `json:"stop"` + } + + if err := json.Unmarshal(b, &s); err != nil { + return err + } + *u = TenantUsage(s.tmp) + + u.Start = time.Time(s.Start) + u.Stop = time.Time(s.Stop) + + return nil +} + +// ServerUsage is a detailed set of information about a specific instance inside a tenant +type ServerUsage struct { + // EndedAt is the date and time when the server was deleted + EndedAt time.Time `json:"-"` + + // Flavor is the display name of a flavor + Flavor string `json:"flavor"` + + // Hours is the duration that the server exists in hours + Hours float64 `json:"hours"` + + // InstanceID is the UUID of the instance + InstanceID string `json:"instance_id"` + + // LocalGB is the sum of the root disk size of the server and the ephemeral disk size of it (in GiB) + LocalGB int `json:"local_gb"` + + // MemoryMB is the memory size of the server (in MB) + MemoryMB int `json:"memory_mb"` + + // Name is the name assigned to the server when it was created + Name string `json:"name"` + + // StartedAt is the date and time when the server was started + StartedAt time.Time `json:"-"` + + // State is the VM power state + State string `json:"state"` + + // TenantID is the UUID of the tenant in a multi-tenancy cloud + TenantID string `json:"tenant_id"` + + // Uptime is the uptime of the server in seconds + Uptime int `json:"uptime"` + + // VCPUs is the number of virtual CPUs that the server uses + VCPUs int `json:"vcpus"` +} + +// UnmarshalJSON sets *u to a copy of data. +func (u *ServerUsage) UnmarshalJSON(b []byte) error { + type tmp ServerUsage + var s struct { + tmp + EndedAt gophercloud.JSONRFC3339MilliNoZ `json:"ended_at"` + StartedAt gophercloud.JSONRFC3339MilliNoZ `json:"started_at"` + } + + if err := json.Unmarshal(b, &s); err != nil { + return err + } + *u = ServerUsage(s.tmp) + + u.EndedAt = time.Time(s.EndedAt) + u.StartedAt = time.Time(s.StartedAt) + + return nil +} + +// SingleTenantPage stores a single, only page of TenantUsage results from a +// SingleTenant call. +type SingleTenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a SingleTenantPage is empty. +func (r SingleTenantPage) IsEmpty() (bool, error) { + ks, err := ExtractSingleTenant(r) + return ks == nil, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r SingleTenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenant_usage_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractSingleTenant interprets a SingleTenantPage as a TenantUsage result. +func ExtractSingleTenant(page pagination.Page) (*TenantUsage, error) { + var s struct { + TenantUsage *TenantUsage `json:"tenant_usage"` + } + err := (page.(SingleTenantPage)).ExtractInto(&s) + return s.TenantUsage, err +} + +// AllTenantsPage stores a single, only page of TenantUsage results from a +// AllTenants call. +type AllTenantsPage struct { + pagination.LinkedPageBase +} + +// ExtractAllTenants interprets a AllTenantsPage as a TenantUsage result. +func ExtractAllTenants(page pagination.Page) ([]TenantUsage, error) { + var s struct { + TenantUsages []TenantUsage `json:"tenant_usages"` + } + err := (page.(AllTenantsPage)).ExtractInto(&s) + return s.TenantUsages, err +} + +// IsEmpty determines whether or not an AllTenantsPage is empty. +func (r AllTenantsPage) IsEmpty() (bool, error) { + usages, err := ExtractAllTenants(r) + return len(usages) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r AllTenantsPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenant_usages_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/doc.go new file mode 100644 index 000000000000..a3521795bb25 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/doc.go @@ -0,0 +1,2 @@ +// simple tenant usage unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/fixtures.go new file mode 100644 index 000000000000..3e37947a4e9e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/fixtures.go @@ -0,0 +1,314 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const FirstTenantID = "aabbccddeeff112233445566" +const SecondTenantID = "665544332211ffeeddccbbaa" + +// GetSingleTenant holds the fixtures for the content of the request for a +// single tenant. +const GetSingleTenant = `{ + "tenant_usage": { + "server_usages": [ + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 0.021675453333333334, + "instance_id": "a70096fd-8196-406b-86c4-045840f53ad7", + "local_gb": 1, + "memory_mb": 512, + "name": "jttest", + "started_at": "2017-11-30T03:23:43.000000", + "state": "active", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 78, + "vcpus": 1 + }, + { + "ended_at": "2017-11-21T04:10:11.000000", + "flavor": "m1.acctest", + "hours": 0.33444444444444443, + "instance_id": "c04e38f2-dcee-4ca8-9466-7708d0a9b6dd", + "local_gb": 15, + "memory_mb": 512, + "name": "basic", + "started_at": "2017-11-21T03:50:07.000000", + "state": "terminated", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 1204, + "vcpus": 1 + }, + { + "ended_at": "2017-11-30T03:21:21.000000", + "flavor": "m1.acctest", + "hours": 0.004166666666666667, + "instance_id": "ceb654fa-e0e8-44fb-8942-e4d0bfad3941", + "local_gb": 15, + "memory_mb": 512, + "name": "ACPTTESTJSxbPQAC34lTnBE1", + "started_at": "2017-11-30T03:21:06.000000", + "state": "terminated", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 15, + "vcpus": 1 + } + ], + "start": "2017-11-02T03:25:01.000000", + "stop": "2017-11-30T03:25:01.000000", + "tenant_id": "aabbccddeeff112233445566", + "total_hours": 1.25834212, + "total_local_gb_usage": 18.571675453333334, + "total_memory_mb_usage": 644.27116544, + "total_vcpus_usage": 1.25834212 + } +}` + +// HandleGetSingleTenantSuccessfully configures the test server to respond to a +// Get request for a single tenant +func HandleGetSingleTenantSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-simple-tenant-usage/"+FirstTenantID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, GetSingleTenant) + }) +} + +// SingleTenantUsageResults is the code fixture for GetSingleTenant. +var SingleTenantUsageResults = usage.TenantUsage{ + ServerUsages: []usage.ServerUsage{ + { + Flavor: "m1.tiny", + Hours: 0.021675453333333334, + InstanceID: "a70096fd-8196-406b-86c4-045840f53ad7", + LocalGB: 1, + MemoryMB: 512, + Name: "jttest", + StartedAt: time.Date(2017, 11, 30, 3, 23, 43, 0, time.UTC), + State: "active", + TenantID: "aabbccddeeff112233445566", + Uptime: 78, + VCPUs: 1, + }, + { + Flavor: "m1.acctest", + Hours: 0.33444444444444443, + InstanceID: "c04e38f2-dcee-4ca8-9466-7708d0a9b6dd", + LocalGB: 15, + MemoryMB: 512, + Name: "basic", + StartedAt: time.Date(2017, 11, 21, 3, 50, 7, 0, time.UTC), + EndedAt: time.Date(2017, 11, 21, 4, 10, 11, 0, time.UTC), + State: "terminated", + TenantID: "aabbccddeeff112233445566", + Uptime: 1204, + VCPUs: 1, + }, + { + Flavor: "m1.acctest", + Hours: 0.004166666666666667, + InstanceID: "ceb654fa-e0e8-44fb-8942-e4d0bfad3941", + LocalGB: 15, + MemoryMB: 512, + Name: "ACPTTESTJSxbPQAC34lTnBE1", + StartedAt: time.Date(2017, 11, 30, 3, 21, 6, 0, time.UTC), + EndedAt: time.Date(2017, 11, 30, 3, 21, 21, 0, time.UTC), + State: "terminated", + TenantID: "aabbccddeeff112233445566", + Uptime: 15, + VCPUs: 1, + }, + }, + Start: time.Date(2017, 11, 2, 3, 25, 1, 0, time.UTC), + Stop: time.Date(2017, 11, 30, 3, 25, 1, 0, time.UTC), + TenantID: "aabbccddeeff112233445566", + TotalHours: 1.25834212, + TotalLocalGBUsage: 18.571675453333334, + TotalMemoryMBUsage: 644.27116544, + TotalVCPUsUsage: 1.25834212, +} + +// GetAllTenants holds the fixtures for the content of the request for +// all tenants. +const GetAllTenants = `{ + "tenant_usages": [ + { + "server_usages": [ + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 0.021675453333333334, + "instance_id": "a70096fd-8196-406b-86c4-045840f53ad7", + "local_gb": 1, + "memory_mb": 512, + "name": "jttest", + "started_at": "2017-11-30T03:23:43.000000", + "state": "active", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 78, + "vcpus": 1 + }, + { + "ended_at": "2017-11-21T04:10:11.000000", + "flavor": "m1.acctest", + "hours": 0.33444444444444443, + "instance_id": "c04e38f2-dcee-4ca8-9466-7708d0a9b6dd", + "local_gb": 15, + "memory_mb": 512, + "name": "basic", + "started_at": "2017-11-21T03:50:07.000000", + "state": "terminated", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 1204, + "vcpus": 1 + }, + { + "ended_at": "2017-11-30T03:21:21.000000", + "flavor": "m1.acctest", + "hours": 0.004166666666666667, + "instance_id": "ceb654fa-e0e8-44fb-8942-e4d0bfad3941", + "local_gb": 15, + "memory_mb": 512, + "name": "ACPTTESTJSxbPQAC34lTnBE1", + "started_at": "2017-11-30T03:21:06.000000", + "state": "terminated", + "tenant_id": "aabbccddeeff112233445566", + "uptime": 15, + "vcpus": 1 + } + ], + "start": "2017-11-02T03:25:01.000000", + "stop": "2017-11-30T03:25:01.000000", + "tenant_id": "aabbccddeeff112233445566", + "total_hours": 1.25834212, + "total_local_gb_usage": 18.571675453333334, + "total_memory_mb_usage": 644.27116544, + "total_vcpus_usage": 1.25834212 + }, + { + "server_usages": [ + { + "ended_at": null, + "flavor": "m1.tiny", + "hours": 0.021675453333333334, + "instance_id": "a70096fd-8196-406b-86c4-045840f53ad7", + "local_gb": 1, + "memory_mb": 512, + "name": "test", + "started_at": "2017-11-30T03:23:43.000000", + "state": "active", + "tenant_id": "665544332211ffeeddccbbaa", + "uptime": 78, + "vcpus": 1 + } + ], + "start": "2017-11-02T03:25:01.000000", + "stop": "2017-11-30T03:25:01.000000", + "tenant_id": "665544332211ffeeddccbbaa", + "total_hours": 0.021675453333333334, + "total_local_gb_usage": 18.571675453333334, + "total_memory_mb_usage": 644.27116544, + "total_vcpus_usage": 1.25834212 + } + ] +}` + +// HandleGetAllTenantsSuccessfully configures the test server to respond to a +// Get request for all tenants. +func HandleGetAllTenantsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/os-simple-tenant-usage", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Content-Type", "application/json") + fmt.Fprint(w, GetAllTenants) + }) +} + +// AllTenantsUsageResult is the code fixture for GetAllTenants. +var AllTenantsUsageResult = []usage.TenantUsage{ + { + ServerUsages: []usage.ServerUsage{ + { + Flavor: "m1.tiny", + Hours: 0.021675453333333334, + InstanceID: "a70096fd-8196-406b-86c4-045840f53ad7", + LocalGB: 1, + MemoryMB: 512, + Name: "jttest", + StartedAt: time.Date(2017, 11, 30, 3, 23, 43, 0, time.UTC), + State: "active", + TenantID: "aabbccddeeff112233445566", + Uptime: 78, + VCPUs: 1, + }, + { + Flavor: "m1.acctest", + Hours: 0.33444444444444443, + InstanceID: "c04e38f2-dcee-4ca8-9466-7708d0a9b6dd", + LocalGB: 15, + MemoryMB: 512, + Name: "basic", + StartedAt: time.Date(2017, 11, 21, 3, 50, 7, 0, time.UTC), + EndedAt: time.Date(2017, 11, 21, 4, 10, 11, 0, time.UTC), + State: "terminated", + TenantID: "aabbccddeeff112233445566", + Uptime: 1204, + VCPUs: 1, + }, + { + Flavor: "m1.acctest", + Hours: 0.004166666666666667, + InstanceID: "ceb654fa-e0e8-44fb-8942-e4d0bfad3941", + LocalGB: 15, + MemoryMB: 512, + Name: "ACPTTESTJSxbPQAC34lTnBE1", + StartedAt: time.Date(2017, 11, 30, 3, 21, 6, 0, time.UTC), + EndedAt: time.Date(2017, 11, 30, 3, 21, 21, 0, time.UTC), + State: "terminated", + TenantID: "aabbccddeeff112233445566", + Uptime: 15, + VCPUs: 1, + }, + }, + Start: time.Date(2017, 11, 2, 3, 25, 1, 0, time.UTC), + Stop: time.Date(2017, 11, 30, 3, 25, 1, 0, time.UTC), + TenantID: "aabbccddeeff112233445566", + TotalHours: 1.25834212, + TotalLocalGBUsage: 18.571675453333334, + TotalMemoryMBUsage: 644.27116544, + TotalVCPUsUsage: 1.25834212, + }, + { + ServerUsages: []usage.ServerUsage{ + { + Flavor: "m1.tiny", + Hours: 0.021675453333333334, + InstanceID: "a70096fd-8196-406b-86c4-045840f53ad7", + LocalGB: 1, + MemoryMB: 512, + Name: "test", + StartedAt: time.Date(2017, 11, 30, 3, 23, 43, 0, time.UTC), + State: "active", + TenantID: "665544332211ffeeddccbbaa", + Uptime: 78, + VCPUs: 1, + }, + }, + Start: time.Date(2017, 11, 2, 3, 25, 1, 0, time.UTC), + Stop: time.Date(2017, 11, 30, 3, 25, 1, 0, time.UTC), + TenantID: "665544332211ffeeddccbbaa", + TotalHours: 0.021675453333333334, + TotalLocalGBUsage: 18.571675453333334, + TotalMemoryMBUsage: 644.27116544, + TotalVCPUsUsage: 1.25834212, + }, +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/requests_test.go new file mode 100644 index 000000000000..1b4f27694c93 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/testing/requests_test.go @@ -0,0 +1,52 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetTenant(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSingleTenantSuccessfully(t) + + count := 0 + err := usage.SingleTenant(client.ServiceClient(), FirstTenantID, nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := usage.ExtractSingleTenant(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &SingleTenantUsageResults, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestAllTenants(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetAllTenantsSuccessfully(t) + + getOpts := usage.AllTenantsOpts{ + Detailed: true, + } + + count := 0 + err := usage.AllTenants(client.ServiceClient(), getOpts).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := usage.ExtractAllTenants(page) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, AllTenantsUsageResult, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/urls.go new file mode 100644 index 000000000000..506361070101 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/usage/urls.go @@ -0,0 +1,13 @@ +package usage + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-simple-tenant-usage" + +func allTenantsURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(resourcePath) +} + +func getTenantURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL(resourcePath, tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go new file mode 100644 index 000000000000..484eb20000c0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/doc.go @@ -0,0 +1,30 @@ +/* +Package volumeattach provides the ability to attach and detach volumes +from servers. + +Example to Attach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + volumeID := "87463836-f0e2-4029-abf6-20c8892a3103" + + createOpts := volumeattach.CreateOpts{ + Device: "/dev/vdc", + VolumeID: volumeID, + } + + result, err := volumeattach.Create(computeClient, serverID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Detach a Volume + + serverID := "7ac8686c-de71-4acb-9600-ec18b1a1ed6d" + attachmentID := "ed081613-1c9b-4231-aa5e-ebfd4d87f983" + + err := volumeattach.Delete(computeClient, serverID, attachmentID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package volumeattach diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go new file mode 100644 index 000000000000..6a262c212e12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/requests.go @@ -0,0 +1,60 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List returns a Pager that allows you to iterate over a collection of +// VolumeAttachments. +func List(client *gophercloud.ServiceClient, serverID string) pagination.Pager { + return pagination.NewPager(client, listURL(client, serverID), func(r pagination.PageResult) pagination.Page { + return VolumeAttachmentPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + ToVolumeAttachmentCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies volume attachment creation or import parameters. +type CreateOpts struct { + // Device is the device that the volume will attach to the instance as. + // Omit for "auto". + Device string `json:"device,omitempty"` + + // VolumeID is the ID of the volume to attach to the instance. + VolumeID string `json:"volumeId" required:"true"` +} + +// ToVolumeAttachmentCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToVolumeAttachmentCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "volumeAttachment") +} + +// Create requests the creation of a new volume attachment on the server. +func Create(client *gophercloud.ServiceClient, serverID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToVolumeAttachmentCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client, serverID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get returns public data about a previously created VolumeAttachment. +func Get(client *gophercloud.ServiceClient, serverID, attachmentID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, serverID, attachmentID), &r.Body, nil) + return +} + +// Delete requests the deletion of a previous stored VolumeAttachment from +// the server. +func Delete(client *gophercloud.ServiceClient, serverID, attachmentID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, serverID, attachmentID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go new file mode 100644 index 000000000000..56d503472911 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/results.go @@ -0,0 +1,77 @@ +package volumeattach + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// VolumeAttachment contains attachment information between a volume +// and server. +type VolumeAttachment struct { + // ID is a unique id of the attachment. + ID string `json:"id"` + + // Device is what device the volume is attached as. + Device string `json:"device"` + + // VolumeID is the ID of the attached volume. + VolumeID string `json:"volumeId"` + + // ServerID is the ID of the instance that has the volume attached. + ServerID string `json:"serverId"` +} + +// VolumeAttachmentPage stores a single page all of VolumeAttachment +// results from a List call. +type VolumeAttachmentPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a VolumeAttachmentPage is empty. +func (page VolumeAttachmentPage) IsEmpty() (bool, error) { + va, err := ExtractVolumeAttachments(page) + return len(va) == 0, err +} + +// ExtractVolumeAttachments interprets a page of results as a slice of +// VolumeAttachment. +func ExtractVolumeAttachments(r pagination.Page) ([]VolumeAttachment, error) { + var s struct { + VolumeAttachments []VolumeAttachment `json:"volumeAttachments"` + } + err := (r.(VolumeAttachmentPage)).ExtractInto(&s) + return s.VolumeAttachments, err +} + +// VolumeAttachmentResult is the result from a volume attachment operation. +type VolumeAttachmentResult struct { + gophercloud.Result +} + +// Extract is a method that attempts to interpret any VolumeAttachment resource +// response as a VolumeAttachment struct. +func (r VolumeAttachmentResult) Extract() (*VolumeAttachment, error) { + var s struct { + VolumeAttachment *VolumeAttachment `json:"volumeAttachment"` + } + err := r.ExtractInto(&s) + return s.VolumeAttachment, err +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a VolumeAttachment. +type CreateResult struct { + VolumeAttachmentResult +} + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as a VolumeAttachment. +type GetResult struct { + VolumeAttachmentResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go new file mode 100644 index 000000000000..11dfc06942d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/doc.go @@ -0,0 +1,2 @@ +// volumeattach unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go new file mode 100644 index 000000000000..4f996106e43a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/fixtures.go @@ -0,0 +1,108 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput is a sample response to a List call. +const ListOutput = ` +{ + "volumeAttachments": [ + { + "device": "/dev/vdd", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f803", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f803" + }, + { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "volumeAttachment": { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } +} +` + +// CreateOutput is a sample response to a Create call. +const CreateOutput = ` +{ + "volumeAttachment": { + "device": "/dev/vdc", + "id": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "serverId": "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804" + } +} +` + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request +// for an existing attachment +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments/a26887c6-c47b-4654-abb5-dfadf7d3f804", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request +// for a new attachment +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` +{ + "volumeAttachment": { + "volumeId": "a26887c6-c47b-4654-abb5-dfadf7d3f804", + "device": "/dev/vdc" + } +} +`) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateOutput) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request for a +// an existing attachment +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/4d8c3732-a248-40ed-bebc-539a6ffd25c0/os-volume_attachments/a26887c6-c47b-4654-abb5-dfadf7d3f804", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/requests_test.go new file mode 100644 index 000000000000..9486f9bb5617 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/testing/requests_test.go @@ -0,0 +1,102 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// FirstVolumeAttachment is the first result in ListOutput. +var FirstVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdd", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f803", +} + +// SecondVolumeAttachment is the first result in ListOutput. +var SecondVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +// ExpectedVolumeAttachmentSlide is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedVolumeAttachmentSlice = []volumeattach.VolumeAttachment{FirstVolumeAttachment, SecondVolumeAttachment} + +//CreatedVolumeAttachment is the parsed result from CreatedOutput. +var CreatedVolumeAttachment = volumeattach.VolumeAttachment{ + Device: "/dev/vdc", + ID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + ServerID: "4d8c3732-a248-40ed-bebc-539a6ffd25c0", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + serverID := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + count := 0 + err := volumeattach.List(client.ServiceClient(), serverID).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := volumeattach.ExtractVolumeAttachments(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedVolumeAttachmentSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateSuccessfully(t) + + serverID := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := volumeattach.Create(client.ServiceClient(), serverID, volumeattach.CreateOpts{ + Device: "/dev/vdc", + VolumeID: "a26887c6-c47b-4654-abb5-dfadf7d3f804", + }).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedVolumeAttachment, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetSuccessfully(t) + + aID := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverID := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + actual, err := volumeattach.Get(client.ServiceClient(), serverID, aID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &SecondVolumeAttachment, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteSuccessfully(t) + + aID := "a26887c6-c47b-4654-abb5-dfadf7d3f804" + serverID := "4d8c3732-a248-40ed-bebc-539a6ffd25c0" + + err := volumeattach.Delete(client.ServiceClient(), serverID, aID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go new file mode 100644 index 000000000000..083f8dc4554f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach/urls.go @@ -0,0 +1,25 @@ +package volumeattach + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "os-volume_attachments" + +func resourceURL(c *gophercloud.ServiceClient, serverID string) string { + return c.ServiceURL("servers", serverID, resourcePath) +} + +func listURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func createURL(c *gophercloud.ServiceClient, serverID string) string { + return resourceURL(c, serverID) +} + +func getURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return c.ServiceURL("servers", serverID, resourcePath, aID) +} + +func deleteURL(c *gophercloud.ServiceClient, serverID, aID string) string { + return getURL(c, serverID, aID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go new file mode 100644 index 000000000000..34d8764fadb2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/doc.go @@ -0,0 +1,137 @@ +/* +Package flavors provides information and interaction with the flavor API +in the OpenStack Compute service. + +A flavor is an available hardware configuration for a server. Each flavor +has a unique combination of disk space, memory capacity and priority for CPU +time. + +Example to List Flavors + + listOpts := flavors.ListOpts{ + AccessType: flavors.PublicAccess, + } + + allPages, err := flavors.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFlavors, err := flavors.ExtractFlavors(allPages) + if err != nil { + panic(err) + } + + for _, flavor := range allFlavors { + fmt.Printf("%+v\n", flavor) + } + +Example to Create a Flavor + + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: gophercloud.IntToPointer(1), + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + flavor, err := flavors.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List Flavor Access + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + allPages, err := flavors.ListAccesses(computeClient, flavorID).AllPages() + if err != nil { + panic(err) + } + + allAccesses, err := flavors.ExtractAccesses(allPages) + if err != nil { + panic(err) + } + + for _, access := range allAccesses { + fmt.Printf("%+v", access) + } + +Example to Grant Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.AddAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.AddAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove/Revoke Access to a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + accessOpts := flavors.RemoveAccessOpts{ + Tenant: "15153a0979884b59b0592248ef947921", + } + + accessList, err := flavors.RemoveAccess(computeClient, flavor.ID, accessOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + createdExtraSpecs, err := flavors.CreateExtraSpecs(computeClient, flavorID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", createdExtraSpecs) + +Example to Get Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + extraSpecs, err := flavors.ListExtraSpecs(computeClient, flavorID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", extraSpecs) + +Example to Update Extra Specs for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_thread_policy": "CPU-THREAD-POLICY-UPDATED", + } + updatedExtraSpec, err := flavors.UpdateExtraSpec(computeClient, flavorID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v", updatedExtraSpec) + +Example to Delete an Extra Spec for a Flavor + + flavorID := "e91758d6-a54a-4778-ad72-0c73a1cb695b" + err := flavors.DeleteExtraSpec(computeClient, flavorID, "hw:cpu_thread_policy").ExtractErr() + if err != nil { + panic(err) + } +*/ +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go new file mode 100644 index 000000000000..539019e90d37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/requests.go @@ -0,0 +1,357 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFlavorListQuery() (string, error) +} + +/* + AccessType maps to OpenStack's Flavor.is_public field. Although the is_public + field is boolean, the request options are ternary, which is why AccessType is + a string. The following values are allowed: + + The AccessType arguement is optional, and if it is not supplied, OpenStack + returns the PublicAccess flavors. +*/ +type AccessType string + +const ( + // PublicAccess returns public flavors and private flavors associated with + // that project. + PublicAccess AccessType = "true" + + // PrivateAccess (admin only) returns private flavors, across all projects. + PrivateAccess AccessType = "false" + + // AllAccess (admin only) returns public and private flavors across all + // projects. + AllAccess AccessType = "None" +) + +/* + ListOpts filters the results returned by the List() function. + For example, a flavor with a minDisk field of 10 will not be returned if you + specify MinDisk set to 20. + + Typically, software will use the last ID of the previous call to List to set + the Marker for the current call. +*/ +type ListOpts struct { + // ChangesSince, if provided, instructs List to return only those things which + // have changed since the timestamp provided. + ChangesSince string `q:"changes-since"` + + // MinDisk and MinRAM, if provided, elides flavors which do not meet your + // criteria. + MinDisk int `q:"minDisk"` + MinRAM int `q:"minRam"` + + // SortDir allows to select sort direction. + // It can be "asc" or "desc" (default). + SortDir string `q:"sort_dir"` + + // SortKey allows to sort by one of the flavors attributes. + // Default is flavorid. + SortKey string `q:"sort_key"` + + // Marker and Limit control paging. + // Marker instructs List where to start listing from. + Marker string `q:"marker"` + + // Limit instructs List to refrain from sending excessively large lists of + // flavors. + Limit int `q:"limit"` + + // AccessType, if provided, instructs List which set of flavors to return. + // If IsPublic not provided, flavors for the current project are returned. + AccessType AccessType `q:"is_public"` +} + +// ToFlavorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFlavorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail instructs OpenStack to provide a list of flavors. +// You may provide criteria by which List curtails its results for easier +// processing. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToFlavorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type CreateOptsBuilder interface { + ToFlavorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters used for creating a flavor. +type CreateOpts struct { + // Name is the name of the flavor. + Name string `json:"name" required:"true"` + + // RAM is the memory of the flavor, measured in MB. + RAM int `json:"ram" required:"true"` + + // VCPUs is the number of vcpus for the flavor. + VCPUs int `json:"vcpus" required:"true"` + + // Disk the amount of root disk space, measured in GB. + Disk *int `json:"disk" required:"true"` + + // ID is a unique ID for the flavor. + ID string `json:"id,omitempty"` + + // Swap is the amount of swap space for the flavor, measured in MB. + Swap *int `json:"swap,omitempty"` + + // RxTxFactor alters the network bandwidth of a flavor. + RxTxFactor float64 `json:"rxtx_factor,omitempty"` + + // IsPublic flags a flavor as being available to all projects or not. + IsPublic *bool `json:"os-flavor-access:is_public,omitempty"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral *int `json:"OS-FLV-EXT-DATA:ephemeral,omitempty"` +} + +// ToFlavorCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToFlavorCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "flavor") +} + +// Create requests the creation of a new flavor. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFlavorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get retrieves details of a single flavor. Use ExtractFlavor to convert its +// result into a Flavor. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified flavor ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListAccesses retrieves the tenants which have access to a flavor. +func ListAccesses(client *gophercloud.ServiceClient, id string) pagination.Pager { + url := accessURL(client, id) + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return AccessPage{pagination.SinglePageBase(r)} + }) +} + +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess requests. +type AddAccessOptsBuilder interface { + ToFlavorAddAccessMap() (map[string]interface{}, error) +} + +// AddAccessOpts represents options for adding access to a flavor. +type AddAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorAddAccessMap constructs a request body from AddAccessOpts. +func (opts AddAccessOpts) ToFlavorAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addTenantAccess") +} + +// AddAccess grants a tenant/project access to a flavor. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToFlavorAddAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveAccessOptsBuilder allows extensions to add additional parameters to the +// RemoveAccess requests. +type RemoveAccessOptsBuilder interface { + ToFlavorRemoveAccessMap() (map[string]interface{}, error) +} + +// RemoveAccessOpts represents options for removing access to a flavor. +type RemoveAccessOpts struct { + // Tenant is the project/tenant ID to grant access. + Tenant string `json:"tenant"` +} + +// ToFlavorRemoveAccessMap constructs a request body from RemoveAccessOpts. +func (opts RemoveAccessOpts) ToFlavorRemoveAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeTenantAccess") +} + +// RemoveAccess removes/revokes a tenant/project access to a flavor. +func RemoveAccess(client *gophercloud.ServiceClient, id string, opts RemoveAccessOptsBuilder) (r RemoveAccessResult) { + b, err := opts.ToFlavorRemoveAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(accessActionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtraSpecs requests all the extra-specs for the given flavor ID. +func ListExtraSpecs(client *gophercloud.ServiceClient, flavorID string) (r ListExtraSpecsResult) { + _, r.Err = client.Get(extraSpecsListURL(client, flavorID), &r.Body, nil) + return +} + +func GetExtraSpec(client *gophercloud.ServiceClient, flavorID string, key string) (r GetExtraSpecResult) { + _, r.Err = client.Get(extraSpecsGetURL(client, flavorID, key), &r.Body, nil) + return +} + +// CreateExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// CreateExtraSpecs requests. +type CreateExtraSpecsOptsBuilder interface { + ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) +} + +// ExtraSpecsOpts is a map that contains key-value pairs. +type ExtraSpecsOpts map[string]string + +// ToFlavorExtraSpecsCreateMap assembles a body for a Create request based on +// the contents of ExtraSpecsOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecsCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"extra_specs": opts}, nil +} + +// CreateExtraSpecs will create or update the extra-specs key-value pairs for +// the specified Flavor. +func CreateExtraSpecs(client *gophercloud.ServiceClient, flavorID string, opts CreateExtraSpecsOptsBuilder) (r CreateExtraSpecsResult) { + b, err := opts.ToFlavorExtraSpecsCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(extraSpecsCreateURL(client, flavorID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateExtraSpecOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateExtraSpecOptsBuilder interface { + ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) +} + +// ToFlavorExtraSpecUpdateMap assembles a body for an Update request based on +// the contents of a ExtraSpecOpts. +func (opts ExtraSpecsOpts) ToFlavorExtraSpecUpdateMap() (map[string]string, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "flavors.ExtraSpecOpts" + err.Info = "Must have 1 and only one key-value pair" + return nil, "", err + } + + var key string + for k := range opts { + key = k + } + + return opts, key, nil +} + +// UpdateExtraSpec will updates the value of the specified flavor's extra spec +// for the key in opts. +func UpdateExtraSpec(client *gophercloud.ServiceClient, flavorID string, opts UpdateExtraSpecOptsBuilder) (r UpdateExtraSpecResult) { + b, key, err := opts.ToFlavorExtraSpecUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(extraSpecUpdateURL(client, flavorID, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteExtraSpec will delete the key-value pair with the given key for the given +// flavor ID. +func DeleteExtraSpec(client *gophercloud.ServiceClient, flavorID, key string) (r DeleteExtraSpecResult) { + _, r.Err = client.Delete(extraSpecDeleteURL(client, flavorID, key), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// IDFromName is a convienience function that returns a flavor's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractFlavors(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "flavor" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "flavor" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go new file mode 100644 index 000000000000..92fe1b1809d4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/results.go @@ -0,0 +1,252 @@ +package flavors + +import ( + "encoding/json" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type CreateResult struct { + commonResult +} + +// GetResult is the response of a Get operations. Call its Extract method to +// interpret it as a Flavor. +type GetResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract provides access to the individual Flavor returned by the Get and +// Create functions. +func (r commonResult) Extract() (*Flavor, error) { + var s struct { + Flavor *Flavor `json:"flavor"` + } + err := r.ExtractInto(&s) + return s.Flavor, err +} + +// Flavor represent (virtual) hardware configurations for server resources +// in a region. +type Flavor struct { + // ID is the flavor's unique ID. + ID string `json:"id"` + + // Disk is the amount of root disk, measured in GB. + Disk int `json:"disk"` + + // RAM is the amount of memory, measured in MB. + RAM int `json:"ram"` + + // Name is the name of the flavor. + Name string `json:"name"` + + // RxTxFactor describes bandwidth alterations of the flavor. + RxTxFactor float64 `json:"rxtx_factor"` + + // Swap is the amount of swap space, measured in MB. + Swap int `json:"-"` + + // VCPUs indicates how many (virtual) CPUs are available for this flavor. + VCPUs int `json:"vcpus"` + + // IsPublic indicates whether the flavor is public. + IsPublic bool `json:"os-flavor-access:is_public"` + + // Ephemeral is the amount of ephemeral disk space, measured in GB. + Ephemeral int `json:"OS-FLV-EXT-DATA:ephemeral"` +} + +func (r *Flavor) UnmarshalJSON(b []byte) error { + type tmp Flavor + var s struct { + tmp + Swap interface{} `json:"swap"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Flavor(s.tmp) + + switch t := s.Swap.(type) { + case float64: + r.Swap = int(t) + case string: + switch t { + case "": + r.Swap = 0 + default: + swap, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Swap = int(swap) + } + } + + return nil +} + +// FlavorPage contains a single page of all flavors from a ListDetails call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a FlavorPage contains any results. +func (page FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(page) + return len(flavors) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page FlavorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"flavors_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired +// from the ListDetail operation. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} + +// AccessPage contains a single page of all FlavorAccess entries for a flavor. +type AccessPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether an AccessPage is empty. +func (page AccessPage) IsEmpty() (bool, error) { + v, err := ExtractAccesses(page) + return len(v) == 0, err +} + +// ExtractAccesses interprets a page of results as a slice of FlavorAccess. +func ExtractAccesses(r pagination.Page) ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := (r.(AccessPage)).ExtractInto(&s) + return s.FlavorAccesses, err +} + +type accessResult struct { + gophercloud.Result +} + +// AddAccessResult is the response of an AddAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type AddAccessResult struct { + accessResult +} + +// RemoveAccessResult is the response of a RemoveAccess operation. Call its +// Extract method to interpret it as a slice of FlavorAccess. +type RemoveAccessResult struct { + accessResult +} + +// Extract provides access to the result of an access create or delete. +// The result will be all accesses that the flavor has. +func (r accessResult) Extract() ([]FlavorAccess, error) { + var s struct { + FlavorAccesses []FlavorAccess `json:"flavor_access"` + } + err := r.ExtractInto(&s) + return s.FlavorAccesses, err +} + +// FlavorAccess represents an ACL of tenant access to a specific Flavor. +type FlavorAccess struct { + // FlavorID is the unique ID of the flavor. + FlavorID string `json:"flavor_id"` + + // TenantID is the unique ID of the tenant. + TenantID string `json:"tenant_id"` +} + +// Extract interprets any extraSpecsResult as ExtraSpecs, if possible. +func (r extraSpecsResult) Extract() (map[string]string, error) { + var s struct { + ExtraSpecs map[string]string `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.ExtraSpecs, err +} + +// extraSpecsResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type extraSpecsResult struct { + gophercloud.Result +} + +// ListExtraSpecsResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type ListExtraSpecsResult struct { + extraSpecsResult +} + +// CreateExtraSpecResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateExtraSpecsResult struct { + extraSpecsResult +} + +// extraSpecResult contains the result of a call for individual a single +// key-value pair. +type extraSpecResult struct { + gophercloud.Result +} + +// GetExtraSpecResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetExtraSpecResult struct { + extraSpecResult +} + +// UpdateExtraSpecResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateExtraSpecResult struct { + extraSpecResult +} + +// DeleteExtraSpecResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteExtraSpecResult struct { + gophercloud.ErrResult +} + +// Extract interprets any extraSpecResult as an ExtraSpec, if possible. +func (r extraSpecResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/doc.go new file mode 100644 index 000000000000..c27087b5665e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/doc.go @@ -0,0 +1,2 @@ +// flavors unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/fixtures.go new file mode 100644 index 000000000000..445f769b2b09 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/fixtures.go @@ -0,0 +1,116 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ExtraSpecsGetBody provides a GET result of the extra_specs for a flavor +const ExtraSpecsGetBody = ` +{ + "extra_specs" : { + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY" + } +} +` + +// GetExtraSpecBody provides a GET result of a particular extra_spec for a flavor +const GetExtraSpecBody = ` +{ + "hw:cpu_policy": "CPU-POLICY" +} +` + +// UpdatedExtraSpecBody provides an PUT result of a particular updated extra_spec for a flavor +const UpdatedExtraSpecBody = ` +{ + "hw:cpu_policy": "CPU-POLICY-2" +} +` + +// ExtraSpecs is the expected extra_specs returned from GET on a flavor's extra_specs +var ExtraSpecs = map[string]string{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", +} + +// ExtraSpec is the expected extra_spec returned from GET on a flavor's extra_specs +var ExtraSpec = map[string]string{ + "hw:cpu_policy": "CPU-POLICY", +} + +// UpdatedExtraSpec is the expected extra_spec returned from PUT on a flavor's extra_specs +var UpdatedExtraSpec = map[string]string{ + "hw:cpu_policy": "CPU-POLICY-2", +} + +func HandleExtraSpecsListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/1/os-extra_specs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ExtraSpecsGetBody) + }) +} + +func HandleExtraSpecGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/1/os-extra_specs/hw:cpu_policy", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetExtraSpecBody) + }) +} + +func HandleExtraSpecsCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/1/os-extra_specs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, `{ + "extra_specs": { + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY" + } + }`) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ExtraSpecsGetBody) + }) +} + +func HandleExtraSpecUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/1/os-extra_specs/hw:cpu_policy", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, `{ + "hw:cpu_policy": "CPU-POLICY-2" + }`) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdatedExtraSpecBody) + }) +} + +func HandleExtraSpecDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/flavors/1/os-extra_specs/hw:cpu_policy", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusOK) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/requests_test.go new file mode 100644 index 000000000000..fba0d4776b3f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/testing/requests_test.go @@ -0,0 +1,402 @@ +package testing + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const tokenID = "blerb" + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "flavors": [ + { + "id": "1", + "name": "m1.tiny", + "vcpus": 1, + "disk": 1, + "ram": 512, + "swap":"", + "os-flavor-access:is_public": true, + "OS-FLV-EXT-DATA:ephemeral": 10 + }, + { + "id": "2", + "name": "m1.small", + "vcpus": 1, + "disk": 20, + "ram": 2048, + "swap": 1000, + "os-flavor-access:is_public": true, + "OS-FLV-EXT-DATA:ephemeral": 0 + }, + { + "id": "3", + "name": "m1.medium", + "vcpus": 2, + "disk": 40, + "ram": 4096, + "swap": 1000, + "os-flavor-access:is_public": false, + "OS-FLV-EXT-DATA:ephemeral": 0 + } + ], + "flavors_links": [ + { + "href": "%s/flavors/detail?marker=2", + "rel": "next" + } + ] + } + `, th.Server.URL) + case "2": + fmt.Fprintf(w, `{ "flavors": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + pages := 0 + // Get public and private flavors + err := flavors.ListDetail(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := flavors.ExtractFlavors(page) + if err != nil { + return false, err + } + + expected := []flavors.Flavor{ + {ID: "1", Name: "m1.tiny", VCPUs: 1, Disk: 1, RAM: 512, Swap: 0, IsPublic: true, Ephemeral: 10}, + {ID: "2", Name: "m1.small", VCPUs: 1, Disk: 20, RAM: 2048, Swap: 1000, IsPublic: true, Ephemeral: 0}, + {ID: "3", Name: "m1.medium", VCPUs: 2, Disk: 40, RAM: 4096, Swap: 1000, IsPublic: false, Ephemeral: 0}, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } + + return true, nil + }) + if err != nil { + t.Fatal(err) + } + if pages != 1 { + t.Errorf("Expected one page, got %d", pages) + } +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "flavor": { + "id": "1", + "name": "m1.tiny", + "disk": 1, + "ram": 512, + "vcpus": 1, + "rxtx_factor": 1, + "swap": "" + } + } + `) + }) + + actual, err := flavors.Get(fake.ServiceClient(), "12345").Extract() + if err != nil { + t.Fatalf("Unable to get flavor: %v", err) + } + + expected := &flavors.Flavor{ + ID: "1", + Name: "m1.tiny", + Disk: 1, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1, + Swap: 0, + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} + +func TestCreateFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "flavor": { + "id": "1", + "name": "m1.tiny", + "disk": 1, + "ram": 512, + "vcpus": 1, + "rxtx_factor": 1, + "swap": "" + } + } + `) + }) + + disk := 1 + opts := &flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + actual, err := flavors.Create(fake.ServiceClient(), opts).Extract() + if err != nil { + t.Fatalf("Unable to create flavor: %v", err) + } + + expected := &flavors.Flavor{ + ID: "1", + Name: "m1.tiny", + Disk: 1, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1, + Swap: 0, + } + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} + +func TestDeleteFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) + + res := flavors.Delete(fake.ServiceClient(), "12345678") + th.AssertNoErr(t, res.Err) +} + +func TestFlavorAccessesList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345678/os-flavor-access", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "flavor_access": [ + { + "flavor_id": "12345678", + "tenant_id": "2f954bcf047c4ee9b09a37d49ae6db54" + } + ] + } + `) + }) + + expected := []flavors.FlavorAccess{ + flavors.FlavorAccess{ + FlavorID: "12345678", + TenantID: "2f954bcf047c4ee9b09a37d49ae6db54", + }, + } + + allPages, err := flavors.ListAccesses(fake.ServiceClient(), "12345678").AllPages() + th.AssertNoErr(t, err) + + actual, err := flavors.ExtractAccesses(allPages) + th.AssertNoErr(t, err) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} + +func TestFlavorAccessAdd(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345678/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "addTenantAccess": { + "tenant": "2f954bcf047c4ee9b09a37d49ae6db54" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "flavor_access": [ + { + "flavor_id": "12345678", + "tenant_id": "2f954bcf047c4ee9b09a37d49ae6db54" + } + ] + } + `) + }) + + expected := []flavors.FlavorAccess{ + flavors.FlavorAccess{ + FlavorID: "12345678", + TenantID: "2f954bcf047c4ee9b09a37d49ae6db54", + }, + } + + addAccessOpts := flavors.AddAccessOpts{ + Tenant: "2f954bcf047c4ee9b09a37d49ae6db54", + } + + actual, err := flavors.AddAccess(fake.ServiceClient(), "12345678", addAccessOpts).Extract() + th.AssertNoErr(t, err) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} + +func TestFlavorAccessRemove(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/flavors/12345678/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "removeTenantAccess": { + "tenant": "2f954bcf047c4ee9b09a37d49ae6db54" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "flavor_access": [] + } + `) + }) + + expected := []flavors.FlavorAccess{} + removeAccessOpts := flavors.RemoveAccessOpts{ + Tenant: "2f954bcf047c4ee9b09a37d49ae6db54", + } + + actual, err := flavors.RemoveAccess(fake.ServiceClient(), "12345678", removeAccessOpts).Extract() + th.AssertNoErr(t, err) + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} + +func TestFlavorExtraSpecsList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleExtraSpecsListSuccessfully(t) + + expected := ExtraSpecs + actual, err := flavors.ListExtraSpecs(fake.ServiceClient(), "1").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestFlavorExtraSpecGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleExtraSpecGetSuccessfully(t) + + expected := ExtraSpec + actual, err := flavors.GetExtraSpec(fake.ServiceClient(), "1", "hw:cpu_policy").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestFlavorExtraSpecsCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleExtraSpecsCreateSuccessfully(t) + + createOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY", + "hw:cpu_thread_policy": "CPU-THREAD-POLICY", + } + expected := ExtraSpecs + actual, err := flavors.CreateExtraSpecs(fake.ServiceClient(), "1", createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestFlavorExtraSpecUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleExtraSpecUpdateSuccessfully(t) + + updateOpts := flavors.ExtraSpecsOpts{ + "hw:cpu_policy": "CPU-POLICY-2", + } + expected := UpdatedExtraSpec + actual, err := flavors.UpdateExtraSpec(fake.ServiceClient(), "1", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestFlavorExtraSpecDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleExtraSpecDeleteSuccessfully(t) + + res := flavors.DeleteExtraSpec(fake.ServiceClient(), "1", "hw:cpu_policy") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go new file mode 100644 index 000000000000..8620dd78ad0a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/flavors/urls.go @@ -0,0 +1,49 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" +) + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors", "detail") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func accessURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-flavor-access") +} + +func accessActionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "action") +} + +func extraSpecsListURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecsGetURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecsCreateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id, "os-extra_specs") +} + +func extraSpecUpdateURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} + +func extraSpecDeleteURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("flavors", id, "os-extra_specs", key) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go new file mode 100644 index 000000000000..22410a79a27c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/doc.go @@ -0,0 +1,32 @@ +/* +Package images provides information and interaction with the images through +the OpenStack Compute service. + +This API is deprecated and will be removed from a future version of the Nova +API service. + +An image is a collection of files used to create or rebuild a server. +Operators provide a number of pre-built OS images by default. You may also +create custom images from cloud servers you have launched. + +Example to List Images + + listOpts := images.ListOpts{ + Limit: 2, + } + + allPages, err := images.ListDetail(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go new file mode 100644 index 000000000000..558b481b9e7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/requests.go @@ -0,0 +1,109 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// ListDetail request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts contain options filtering Images returned from a call to ListDetail. +type ListOpts struct { + // ChangesSince filters Images based on the last changed status (in date-time + // format). + ChangesSince string `q:"changes-since"` + + // Limit limits the number of Images to return. + Limit int `q:"limit"` + + // Mark is an Image UUID at which to set a marker. + Marker string `q:"marker"` + + // Name is the name of the Image. + Name string `q:"name"` + + // Server is the name of the Server (in URL format). + Server string `q:"server"` + + // Status is the current status of the Image. + Status string `q:"status"` + + // Type is the type of image (e.g. BASE, SERVER, ALL). + Type string `q:"type"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail enumerates the available images. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ImagePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns data about a specific image by its ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Delete deletes the specified image ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// IDFromName is a convienience function that returns an image's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + allPages, err := ListDetail(client, nil).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractImages(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + err := &gophercloud.ErrResourceNotFound{} + err.ResourceType = "image" + err.Name = name + return "", err + case 1: + return id, nil + default: + err := &gophercloud.ErrMultipleResourcesFound{} + err.ResourceType = "image" + err.Name = name + err.Count = count + return "", err + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go new file mode 100644 index 000000000000..70d1018c7217 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/results.go @@ -0,0 +1,95 @@ +package images + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult is the response from a Get operation. Call its Extract method to +// interpret it as an Image. +type GetResult struct { + gophercloud.Result +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Extract interprets a GetResult as an Image. +func (r GetResult) Extract() (*Image, error) { + var s struct { + Image *Image `json:"image"` + } + err := r.ExtractInto(&s) + return s.Image, err +} + +// Image represents an Image returned by the Compute API. +type Image struct { + // ID is the unique ID of an image. + ID string + + // Created is the date when the image was created. + Created string + + // MinDisk is the minimum amount of disk a flavor must have to be able + // to create a server based on the image, measured in GB. + MinDisk int + + // MinRAM is the minimum amount of RAM a flavor must have to be able + // to create a server based on the image, measured in MB. + MinRAM int + + // Name provides a human-readable moniker for the OS image. + Name string + + // The Progress and Status fields indicate image-creation status. + Progress int + + // Status is the current status of the image. + Status string + + // Update is the date when the image was updated. + Updated string + + // Metadata provides free-form key/value pairs that further describe the + // image. + Metadata map[string]interface{} +} + +// ImagePage contains a single page of all Images returne from a ListDetail +// operation. Use ExtractImages to convert it into a slice of usable structs. +type ImagePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Image results. +func (page ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(page) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (page ImagePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"images_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractImages converts a page of List results into a slice of usable Image +// structs. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/doc.go new file mode 100644 index 000000000000..db10451530e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/doc.go @@ -0,0 +1,2 @@ +// images unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/requests_test.go new file mode 100644 index 000000000000..1de030352e40 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/testing/requests_test.go @@ -0,0 +1,225 @@ +package testing + +import ( + "encoding/json" + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListImages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "images": [ + { + "status": "ACTIVE", + "updated": "2014-09-23T12:54:56Z", + "id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + "OS-EXT-IMG-SIZE:size": 476704768, + "name": "F17-x86_64-cfntools", + "created": "2014-09-23T12:54:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "metadata": { + "architecture": "x86_64", + "block_device_mapping": { + "guest_format": null, + "boot_index": 0, + "device_name": "/dev/vda", + "delete_on_termination": false + } + } + }, + { + "status": "ACTIVE", + "updated": "2014-09-23T12:51:43Z", + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "OS-EXT-IMG-SIZE:size": 13167616, + "name": "cirros-0.3.2-x86_64-disk", + "created": "2014-09-23T12:51:42Z", + "minDisk": 0, + "progress": 100, + "minRam": 0 + } + ] + } + `) + case "2": + fmt.Fprintf(w, `{ "images": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + pages := 0 + options := &images.ListOpts{Limit: 2} + err := images.ListDetail(fake.ServiceClient(), options).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := images.ExtractImages(page) + if err != nil { + return false, err + } + + expected := []images.Image{ + { + ID: "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + Name: "F17-x86_64-cfntools", + Created: "2014-09-23T12:54:52Z", + Updated: "2014-09-23T12:54:56Z", + MinDisk: 0, + MinRAM: 0, + Progress: 100, + Status: "ACTIVE", + Metadata: map[string]interface{}{ + "architecture": "x86_64", + "block_device_mapping": map[string]interface{}{ + "guest_format": interface{}(nil), + "boot_index": float64(0), + "device_name": "/dev/vda", + "delete_on_termination": false, + }, + }, + }, + { + ID: "f90f6034-2570-4974-8351-6b49732ef2eb", + Name: "cirros-0.3.2-x86_64-disk", + Created: "2014-09-23T12:51:42Z", + Updated: "2014-09-23T12:51:43Z", + MinDisk: 0, + MinRAM: 0, + Progress: 100, + Status: "ACTIVE", + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Unexpected page contents: expected %#v, got %#v", expected, actual) + } + + return false, nil + }) + + if err != nil { + t.Fatalf("EachPage error: %v", err) + } + if pages != 1 { + t.Errorf("Expected one page, got %d", pages) + } +} + +func TestGetImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "image": { + "status": "ACTIVE", + "updated": "2014-09-23T12:54:56Z", + "id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + "OS-EXT-IMG-SIZE:size": 476704768, + "name": "F17-x86_64-cfntools", + "created": "2014-09-23T12:54:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0, + "metadata": { + "architecture": "x86_64", + "block_device_mapping": { + "guest_format": null, + "boot_index": 0, + "device_name": "/dev/vda", + "delete_on_termination": false + } + } + } + } + `) + }) + + actual, err := images.Get(fake.ServiceClient(), "12345678").Extract() + if err != nil { + t.Fatalf("Unexpected error from Get: %v", err) + } + + expected := &images.Image{ + Status: "ACTIVE", + Updated: "2014-09-23T12:54:56Z", + ID: "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + Name: "F17-x86_64-cfntools", + Created: "2014-09-23T12:54:52Z", + MinDisk: 0, + Progress: 100, + MinRAM: 0, + Metadata: map[string]interface{}{ + "architecture": "x86_64", + "block_device_mapping": map[string]interface{}{ + "guest_format": interface{}(nil), + "boot_index": float64(0), + "device_name": "/dev/vda", + "delete_on_termination": false, + }, + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but got %#v", expected, actual) + } +} + +func TestNextPageURL(t *testing.T) { + var page images.ImagePage + var body map[string]interface{} + bodyString := []byte(`{"images":{"links":[{"href":"http://192.154.23.87/12345/images/image3","rel":"bookmark"}]}, "images_links":[{"href":"http://192.154.23.87/12345/images/image4","rel":"next"}]}`) + err := json.Unmarshal(bodyString, &body) + if err != nil { + t.Fatalf("Error unmarshaling data into page body: %v", err) + } + page.Body = body + + expected := "http://192.154.23.87/12345/images/image4" + actual, err := page.NextPageURL() + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) +} + +// Test Image delete +func TestDeleteImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/images/12345678", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := images.Delete(fake.ServiceClient(), "12345678") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go new file mode 100644 index 000000000000..57787fb725ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/images/urls.go @@ -0,0 +1,15 @@ +package images + +import "github.com/gophercloud/gophercloud" + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("images", "detail") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("images", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go new file mode 100644 index 000000000000..3b0ab783626a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/doc.go @@ -0,0 +1,115 @@ +/* +Package servers provides information and interaction with the server API +resource in the OpenStack Compute service. + +A server is a virtual machine instance in the compute system. In order for +one to be provisioned, a valid flavor and image are required. + +Example to List Servers + + listOpts := servers.ListOpts{ + AllTenants: true, + } + + allPages, err := servers.List(computeClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServers, err := servers.ExtractServers(allPages) + if err != nil { + panic(err) + } + + for _, server := range allServers { + fmt.Printf("%+v\n", server) + } + +Example to Create a Server + + createOpts := servers.CreateOpts{ + Name: "server_name", + ImageRef: "image-uuid", + FlavorRef: "flavor-uuid", + } + + server, err := servers.Create(computeClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.Delete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Force Delete a Server + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + err := servers.ForceDelete(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Reboot a Server + + rebootOpts := servers.RebootOpts{ + Type: servers.SoftReboot, + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Reboot(computeClient, serverID, rebootOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Rebuild a Server + + rebuildOpts := servers.RebuildOpts{ + Name: "new_name", + ImageID: "image-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + server, err := servers.Rebuilt(computeClient, serverID, rebuildOpts).Extract() + if err != nil { + panic(err) + } + +Example to Resize a Server + + resizeOpts := servers.ResizeOpts{ + FlavorRef: "flavor-uuid", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + err := servers.Resize(computeClient, serverID, resizeOpts).ExtractErr() + if err != nil { + panic(err) + } + + err = servers.ConfirmResize(computeClient, serverID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Snapshot a Server + + snapshotOpts := servers.CreateImageOpts{ + Name: "snapshot_name", + } + + serverID := "d9072956-1560-487c-97f2-18bdf65ec749" + + image, err := servers.CreateImage(computeClient, serverID, snapshotOpts).ExtractImageID() + if err != nil { + panic(err) + } +*/ +package servers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go new file mode 100644 index 000000000000..c9f0e3c20b5d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/errors.go @@ -0,0 +1,71 @@ +package servers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +// ErrNeitherImageIDNorImageNameProvided is the error when neither the image +// ID nor the image name is provided for a server operation +type ErrNeitherImageIDNorImageNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherImageIDNorImageNameProvided) Error() string { + return "One and only one of the image ID and the image name must be provided." +} + +// ErrNeitherFlavorIDNorFlavorNameProvided is the error when neither the flavor +// ID nor the flavor name is provided for a server operation +type ErrNeitherFlavorIDNorFlavorNameProvided struct{ gophercloud.ErrMissingInput } + +func (e ErrNeitherFlavorIDNorFlavorNameProvided) Error() string { + return "One and only one of the flavor ID and the flavor name must be provided." +} + +type ErrNoClientProvidedForIDByName struct{ gophercloud.ErrMissingInput } + +func (e ErrNoClientProvidedForIDByName) Error() string { + return "A service client must be provided to find a resource ID by name." +} + +// ErrInvalidHowParameterProvided is the error when an unknown value is given +// for the `how` argument +type ErrInvalidHowParameterProvided struct{ gophercloud.ErrInvalidInput } + +// ErrNoAdminPassProvided is the error when an administrative password isn't +// provided for a server operation +type ErrNoAdminPassProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoImageIDProvided is the error when an image ID isn't provided for a server +// operation +type ErrNoImageIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrNoIDProvided is the error when a server ID isn't provided for a server +// operation +type ErrNoIDProvided struct{ gophercloud.ErrMissingInput } + +// ErrServer is a generic error type for servers HTTP operations. +type ErrServer struct { + gophercloud.ErrUnexpectedResponseCode + ID string +} + +func (se ErrServer) Error() string { + return fmt.Sprintf("Error while executing HTTP request for server [%s]", se.ID) +} + +// Error404 overrides the generic 404 error message. +func (se ErrServer) Error404(e gophercloud.ErrUnexpectedResponseCode) error { + se.ErrUnexpectedResponseCode = e + return &ErrServerNotFound{se} +} + +// ErrServerNotFound is the error when a 404 is received during server HTTP +// operations. +type ErrServerNotFound struct { + ErrServer +} + +func (e ErrServerNotFound) Error() string { + return fmt.Sprintf("I couldn't find server [%s]", e.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go new file mode 100644 index 000000000000..a6530f8d6515 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/requests.go @@ -0,0 +1,794 @@ +package servers + +import ( + "encoding/base64" + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/flavors" + "github.com/gophercloud/gophercloud/openstack/compute/v2/images" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + // ChangesSince is a time/date stamp for when the server last changed status. + ChangesSince string `q:"changes-since"` + + // Image is the name of the image in URL format. + Image string `q:"image"` + + // Flavor is the name of the flavor in URL format. + Flavor string `q:"flavor"` + + // Name of the server as a string; can be queried with regular expressions. + // Realize that ?name=bob returns both bob and bobb. If you need to match bob + // only, you can use a regular expression matching the syntax of the + // underlying database server implemented for Compute. + Name string `q:"name"` + + // Status is the value of the status of the server so that you can filter on + // "ACTIVE" for example. + Status string `q:"status"` + + // Host is the name of the host as a string. + Host string `q:"host"` + + // Marker is a UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Limit is an integer value for the limit of values to return. + Limit int `q:"limit"` + + // AllTenants is a bool to show all tenants. + AllTenants bool `q:"all_tenants"` + + // TenantID lists servers for a particular tenant. + // Setting "AllTenants = true" is required. + TenantID string `q:"tenant_id"` +} + +// ToServerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list servers accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToServerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServerCreateMap() (map[string]interface{}, error) +} + +// Network is used within CreateOpts to control a new server's network +// attachments. +type Network struct { + // UUID of a network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string + + // FixedIP specifies a fixed IPv4 address to be used on this network. + FixedIP string +} + +// Personality is an array of files that are injected into the server at launch. +type Personality []*File + +// File is used within CreateOpts and RebuildOpts to inject a file into the +// server at launch. +// File implements the json.Marshaler interface, so when a Create or Rebuild +// operation is requested, json.Marshal will call File's MarshalJSON method. +type File struct { + // Path of the file. + Path string + + // Contents of the file. Maximum content size is 255 bytes. + Contents []byte +} + +// MarshalJSON marshals the escaped file, base64 encoding the contents. +func (f *File) MarshalJSON() ([]byte, error) { + file := struct { + Path string `json:"path"` + Contents string `json:"contents"` + }{ + Path: f.Path, + Contents: base64.StdEncoding.EncodeToString(f.Contents), + } + return json.Marshal(file) +} + +// CreateOpts specifies server creation parameters. +type CreateOpts struct { + // Name is the name to assign to the newly launched server. + Name string `json:"name" required:"true"` + + // ImageRef [optional; required if ImageName is not provided] is the ID or + // full URL to the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageRef string `json:"imageRef"` + + // ImageName [optional; required if ImageRef is not provided] is the name of + // the image that contains the server's OS and initial state. + // Also optional if using the boot-from-volume extension. + ImageName string `json:"-"` + + // FlavorRef [optional; required if FlavorName is not provided] is the ID or + // full URL to the flavor that describes the server's specs. + FlavorRef string `json:"flavorRef"` + + // FlavorName [optional; required if FlavorRef is not provided] is the name of + // the flavor that describes the server's specs. + FlavorName string `json:"-"` + + // SecurityGroups lists the names of the security groups to which this server + // should belong. + SecurityGroups []string `json:"-"` + + // UserData contains configuration information or scripts to use upon launch. + // Create will base64-encode it for you, if it isn't already. + UserData []byte `json:"-"` + + // AvailabilityZone in which to launch the server. + AvailabilityZone string `json:"availability_zone,omitempty"` + + // Networks dictates how this server will be attached to available networks. + // By default, the server will be attached to all isolated networks for the + // tenant. + Networks []Network `json:"-"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to the + // server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality includes files to inject into the server at launch. + // Create will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ConfigDrive enables metadata injection through a configuration drive. + ConfigDrive *bool `json:"config_drive,omitempty"` + + // AdminPass sets the root user password. If not set, a randomly-generated + // password will be created and returned in the response. + AdminPass string `json:"adminPass,omitempty"` + + // AccessIPv4 specifies an IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 pecifies an IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToServerCreateMap() (map[string]interface{}, error) { + sc := opts.ServiceClient + opts.ServiceClient = nil + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.UserData != nil { + var userData string + if _, err := base64.StdEncoding.DecodeString(string(opts.UserData)); err != nil { + userData = base64.StdEncoding.EncodeToString(opts.UserData) + } else { + userData = string(opts.UserData) + } + b["user_data"] = &userData + } + + if len(opts.SecurityGroups) > 0 { + securityGroups := make([]map[string]interface{}, len(opts.SecurityGroups)) + for i, groupName := range opts.SecurityGroups { + securityGroups[i] = map[string]interface{}{"name": groupName} + } + b["security_groups"] = securityGroups + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + networks[i] = make(map[string]interface{}) + if net.UUID != "" { + networks[i]["uuid"] = net.UUID + } + if net.Port != "" { + networks[i]["port"] = net.Port + } + if net.FixedIP != "" { + networks[i]["fixed_ip"] = net.FixedIP + } + } + b["networks"] = networks + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageRef == "" { + if opts.ImageName != "" { + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(sc, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + // If FlavorRef isn't provided, use FlavorName to ascertain the flavor ID. + if opts.FlavorRef == "" { + if opts.FlavorName == "" { + err := ErrNeitherFlavorIDNorFlavorNameProvided{} + err.Argument = "FlavorRef/FlavorName" + return nil, err + } + if sc == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + flavorID, err := flavors.IDFromName(sc, opts.FlavorName) + if err != nil { + return nil, err + } + b["flavorRef"] = flavorID + } + + return map[string]interface{}{"server": b}, nil +} + +// Create requests a server to be provisioned to the user in the current tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + reqBody, err := opts.ToServerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), reqBody, &r.Body, nil) + return +} + +// Delete requests that a server previously provisioned be removed from your +// account. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ForceDelete forces the deletion of a server. +func ForceDelete(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"forceDelete": ""}, nil, nil) + return +} + +// Get requests details on a single server, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToServerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// server. +type UpdateOpts struct { + // Name changes the displayed name of the server. + // The server host name will *not* change. + // Server names are not constrained to be unique, even within the same tenant. + Name string `json:"name,omitempty"` + + // AccessIPv4 provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` +} + +// ToServerUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToServerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server") +} + +// Update requests that various attributes of the indicated server be changed. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangeAdminPassword alters the administrator or root password for a specified +// server. +func ChangeAdminPassword(client *gophercloud.ServiceClient, id, newPassword string) (r ActionResult) { + b := map[string]interface{}{ + "changePassword": map[string]string{ + "adminPass": newPassword, + }, + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebootMethod describes the mechanisms by which a server reboot can be requested. +type RebootMethod string + +// These constants determine how a server should be rebooted. +// See the Reboot() function for further details. +const ( + SoftReboot RebootMethod = "SOFT" + HardReboot RebootMethod = "HARD" + OSReboot = SoftReboot + PowerCycle = HardReboot +) + +// RebootOptsBuilder allows extensions to add additional parameters to the +// reboot request. +type RebootOptsBuilder interface { + ToServerRebootMap() (map[string]interface{}, error) +} + +// RebootOpts provides options to the reboot request. +type RebootOpts struct { + // Type is the type of reboot to perform on the server. + Type RebootMethod `json:"type" required:"true"` +} + +// ToServerRebootMap builds a body for the reboot request. +func (opts RebootOpts) ToServerRebootMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "reboot") +} + +/* + Reboot requests that a given server reboot. + + Two methods exist for rebooting a server: + + HardReboot (aka PowerCycle) starts the server instance by physically cutting + power to the machine, or if a VM, terminating it at the hypervisor level. + It's done. Caput. Full stop. + Then, after a brief while, power is rtored or the VM instance restarted. + + SoftReboot (aka OSReboot) simply tells the OS to restart under its own + procedure. + E.g., in Linux, asking it to enter runlevel 6, or executing + "sudo shutdown -r now", or by asking Windows to rtart the machine. +*/ +func Reboot(client *gophercloud.ServiceClient, id string, opts RebootOptsBuilder) (r ActionResult) { + b, err := opts.ToServerRebootMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// RebuildOptsBuilder allows extensions to provide additional parameters to the +// rebuild request. +type RebuildOptsBuilder interface { + ToServerRebuildMap() (map[string]interface{}, error) +} + +// RebuildOpts represents the configuration options used in a server rebuild +// operation. +type RebuildOpts struct { + // AdminPass is the server's admin password + AdminPass string `json:"adminPass,omitempty"` + + // ImageID is the ID of the image you want your server to be provisioned on. + ImageID string `json:"imageRef"` + + // ImageName is readable name of an image. + ImageName string `json:"-"` + + // Name to set the server to + Name string `json:"name,omitempty"` + + // AccessIPv4 [optional] provides a new IPv4 address for the instance. + AccessIPv4 string `json:"accessIPv4,omitempty"` + + // AccessIPv6 [optional] provides a new IPv6 address for the instance. + AccessIPv6 string `json:"accessIPv6,omitempty"` + + // Metadata [optional] contains key-value pairs (up to 255 bytes each) + // to attach to the server. + Metadata map[string]string `json:"metadata,omitempty"` + + // Personality [optional] includes files to inject into the server at launch. + // Rebuild will base64-encode file contents for you. + Personality Personality `json:"personality,omitempty"` + + // ServiceClient will allow calls to be made to retrieve an image or + // flavor ID by name. + ServiceClient *gophercloud.ServiceClient `json:"-"` +} + +// ToServerRebuildMap formats a RebuildOpts struct into a map for use in JSON +func (opts RebuildOpts) ToServerRebuildMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + // If ImageRef isn't provided, check if ImageName was provided to ascertain + // the image ID. + if opts.ImageID == "" { + if opts.ImageName != "" { + if opts.ServiceClient == nil { + err := ErrNoClientProvidedForIDByName{} + err.Argument = "ServiceClient" + return nil, err + } + imageID, err := images.IDFromName(opts.ServiceClient, opts.ImageName) + if err != nil { + return nil, err + } + b["imageRef"] = imageID + } + } + + return map[string]interface{}{"rebuild": b}, nil +} + +// Rebuild will reprovision the server according to the configuration options +// provided in the RebuildOpts struct. +func Rebuild(client *gophercloud.ServiceClient, id string, opts RebuildOptsBuilder) (r RebuildResult) { + b, err := opts.ToServerRebuildMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, nil) + return +} + +// ResizeOptsBuilder allows extensions to add additional parameters to the +// resize request. +type ResizeOptsBuilder interface { + ToServerResizeMap() (map[string]interface{}, error) +} + +// ResizeOpts represents the configuration options used to control a Resize +// operation. +type ResizeOpts struct { + // FlavorRef is the ID of the flavor you wish your server to become. + FlavorRef string `json:"flavorRef" required:"true"` +} + +// ToServerResizeMap formats a ResizeOpts as a map that can be used as a JSON +// request body for the Resize request. +func (opts ResizeOpts) ToServerResizeMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "resize") +} + +// Resize instructs the provider to change the flavor of the server. +// +// Note that this implies rebuilding it. +// +// Unfortunately, one cannot pass rebuild parameters to the resize function. +// When the resize completes, the server will be in VERIFY_RESIZE state. +// While in this state, you can explore the use of the new server's +// configuration. If you like it, call ConfirmResize() to commit the resize +// permanently. Otherwise, call RevertResize() to restore the old configuration. +func Resize(client *gophercloud.ServiceClient, id string, opts ResizeOptsBuilder) (r ActionResult) { + b, err := opts.ToServerResizeMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, nil, nil) + return +} + +// ConfirmResize confirms a previous resize operation on a server. +// See Resize() for more details. +func ConfirmResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"confirmResize": nil}, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202, 204}, + }) + return +} + +// RevertResize cancels a previous resize operation on a server. +// See Resize() for more details. +func RevertResize(client *gophercloud.ServiceClient, id string) (r ActionResult) { + _, r.Err = client.Post(actionURL(client, id), map[string]interface{}{"revertResize": nil}, nil, nil) + return +} + +// ResetMetadataOptsBuilder allows extensions to add additional parameters to +// the Reset request. +type ResetMetadataOptsBuilder interface { + ToMetadataResetMap() (map[string]interface{}, error) +} + +// MetadataOpts is a map that contains key-value pairs. +type MetadataOpts map[string]string + +// ToMetadataResetMap assembles a body for a Reset request based on the contents +// of a MetadataOpts. +func (opts MetadataOpts) ToMetadataResetMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ToMetadataUpdateMap assembles a body for an Update request based on the +// contents of a MetadataOpts. +func (opts MetadataOpts) ToMetadataUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// ResetMetadata will create multiple new key-value pairs for the given server +// ID. +// Note: Using this operation will erase any already-existing metadata and +// create the new metadata provided. To keep any already-existing metadata, +// use the UpdateMetadatas or UpdateMetadata function. +func ResetMetadata(client *gophercloud.ServiceClient, id string, opts ResetMetadataOptsBuilder) (r ResetMetadataResult) { + b, err := opts.ToMetadataResetMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadata requests all the metadata for the given server ID. +func Metadata(client *gophercloud.ServiceClient, id string) (r GetMetadataResult) { + _, r.Err = client.Get(metadataURL(client, id), &r.Body, nil) + return +} + +// UpdateMetadataOptsBuilder allows extensions to add additional parameters to +// the Create request. +type UpdateMetadataOptsBuilder interface { + ToMetadataUpdateMap() (map[string]interface{}, error) +} + +// UpdateMetadata updates (or creates) all the metadata specified by opts for +// the given server ID. This operation does not affect already-existing metadata +// that is not specified by opts. +func UpdateMetadata(client *gophercloud.ServiceClient, id string, opts UpdateMetadataOptsBuilder) (r UpdateMetadataResult) { + b, err := opts.ToMetadataUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(metadataURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// MetadatumOptsBuilder allows extensions to add additional parameters to the +// Create request. +type MetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, string, error) +} + +// MetadatumOpts is a map of length one that contains a key-value pair. +type MetadatumOpts map[string]string + +// ToMetadatumCreateMap assembles a body for a Create request based on the +// contents of a MetadataumOpts. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, string, error) { + if len(opts) != 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "servers.MetadatumOpts" + err.Info = "Must have 1 and only 1 key-value pair" + return nil, "", err + } + metadatum := map[string]interface{}{"meta": opts} + var key string + for k := range metadatum["meta"].(MetadatumOpts) { + key = k + } + return metadatum, key, nil +} + +// CreateMetadatum will create or update the key-value pair with the given key +// for the given server ID. +func CreateMetadatum(client *gophercloud.ServiceClient, id string, opts MetadatumOptsBuilder) (r CreateMetadatumResult) { + b, key, err := opts.ToMetadatumCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadatumURL(client, id, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Metadatum requests the key-value pair with the given key for the given +// server ID. +func Metadatum(client *gophercloud.ServiceClient, id, key string) (r GetMetadatumResult) { + _, r.Err = client.Get(metadatumURL(client, id, key), &r.Body, nil) + return +} + +// DeleteMetadatum will delete the key-value pair with the given key for the +// given server ID. +func DeleteMetadatum(client *gophercloud.ServiceClient, id, key string) (r DeleteMetadatumResult) { + _, r.Err = client.Delete(metadatumURL(client, id, key), nil) + return +} + +// ListAddresses makes a request against the API to list the servers IP +// addresses. +func ListAddresses(client *gophercloud.ServiceClient, id string) pagination.Pager { + return pagination.NewPager(client, listAddressesURL(client, id), func(r pagination.PageResult) pagination.Page { + return AddressPage{pagination.SinglePageBase(r)} + }) +} + +// ListAddressesByNetwork makes a request against the API to list the servers IP +// addresses for the given network. +func ListAddressesByNetwork(client *gophercloud.ServiceClient, id, network string) pagination.Pager { + return pagination.NewPager(client, listAddressesByNetworkURL(client, id, network), func(r pagination.PageResult) pagination.Page { + return NetworkAddressPage{pagination.SinglePageBase(r)} + }) +} + +// CreateImageOptsBuilder allows extensions to add additional parameters to the +// CreateImage request. +type CreateImageOptsBuilder interface { + ToServerCreateImageMap() (map[string]interface{}, error) +} + +// CreateImageOpts provides options to pass to the CreateImage request. +type CreateImageOpts struct { + // Name of the image/snapshot. + Name string `json:"name" required:"true"` + + // Metadata contains key-value pairs (up to 255 bytes each) to attach to + // the created image. + Metadata map[string]string `json:"metadata,omitempty"` +} + +// ToServerCreateImageMap formats a CreateImageOpts structure into a request +// body. +func (opts CreateImageOpts) ToServerCreateImageMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "createImage") +} + +// CreateImage makes a request against the nova API to schedule an image to be +// created of the server +func CreateImage(client *gophercloud.ServiceClient, id string, opts CreateImageOptsBuilder) (r CreateImageResult) { + b, err := opts.ToServerCreateImageMap() + if err != nil { + r.Err = err + return + } + resp, err := client.Post(actionURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + r.Err = err + r.Header = resp.Header + return +} + +// IDFromName is a convienience function that returns a server's ID given its +// name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + allPages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractServers(allPages) + if err != nil { + return "", err + } + + for _, f := range all { + if f.Name == name { + count++ + id = f.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "server"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "server"} + } +} + +// GetPassword makes a request against the nova API to get the encrypted +// administrative password. +func GetPassword(client *gophercloud.ServiceClient, serverId string) (r GetPasswordResult) { + _, r.Err = client.Get(passwordURL(client, serverId), &r.Body, nil) + return +} + +// ShowConsoleOutputOptsBuilder is the interface types must satisfy in order to be +// used as ShowConsoleOutput options +type ShowConsoleOutputOptsBuilder interface { + ToServerShowConsoleOutputMap() (map[string]interface{}, error) +} + +// ShowConsoleOutputOpts satisfies the ShowConsoleOutputOptsBuilder +type ShowConsoleOutputOpts struct { + // The number of lines to fetch from the end of console log. + // All lines will be returned if this is not specified. + Length int `json:"length,omitempty"` +} + +// ToServerShowConsoleOutputMap formats a ShowConsoleOutputOpts structure into a request body. +func (opts ShowConsoleOutputOpts) ToServerShowConsoleOutputMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "os-getConsoleOutput") +} + +// ShowConsoleOutput makes a request against the nova API to get console log from the server +func ShowConsoleOutput(client *gophercloud.ServiceClient, id string, opts ShowConsoleOutputOptsBuilder) (r ShowConsoleOutputResult) { + b, err := opts.ToServerShowConsoleOutputMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(actionURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go new file mode 100644 index 000000000000..f973d1ea0e1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/results.go @@ -0,0 +1,414 @@ +package servers + +import ( + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "net/url" + "path" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type serverResult struct { + gophercloud.Result +} + +// Extract interprets any serverResult as a Server, if possible. +func (r serverResult) Extract() (*Server, error) { + var s Server + err := r.ExtractInto(&s) + return &s, err +} + +func (r serverResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "server") +} + +func ExtractServersInto(r pagination.Page, v interface{}) error { + return r.(ServerPage).Result.ExtractIntoSlicePtr(v, "servers") +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Server. +type CreateResult struct { + serverResult +} + +// GetResult is the response from a Get operation. Call its Extract +// method to interpret it as a Server. +type GetResult struct { + serverResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Server. +type UpdateResult struct { + serverResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RebuildResult is the response from a Rebuild operation. Call its Extract +// method to interpret it as a Server. +type RebuildResult struct { + serverResult +} + +// ActionResult represents the result of server action operations, like reboot. +// Call its ExtractErr method to determine if the action succeeded or failed. +type ActionResult struct { + gophercloud.ErrResult +} + +// CreateImageResult is the response from a CreateImage operation. Call its +// ExtractImageID method to retrieve the ID of the newly created image. +type CreateImageResult struct { + gophercloud.Result +} + +// ShowConsoleOutputResult represents the result of console output from a server +type ShowConsoleOutputResult struct { + gophercloud.Result +} + +// Extract will return the console output from a ShowConsoleOutput request. +func (r ShowConsoleOutputResult) Extract() (string, error) { + var s struct { + Output string `json:"output"` + } + + err := r.ExtractInto(&s) + return s.Output, err +} + +// GetPasswordResult represent the result of a get os-server-password operation. +// Call its ExtractPassword method to retrieve the password. +type GetPasswordResult struct { + gophercloud.Result +} + +// ExtractPassword gets the encrypted password. +// If privateKey != nil the password is decrypted with the private key. +// If privateKey == nil the encrypted password is returned and can be decrypted +// with: +// echo '' | base64 -D | openssl rsautl -decrypt -inkey +func (r GetPasswordResult) ExtractPassword(privateKey *rsa.PrivateKey) (string, error) { + var s struct { + Password string `json:"password"` + } + err := r.ExtractInto(&s) + if err == nil && privateKey != nil && s.Password != "" { + return decryptPassword(s.Password, privateKey) + } + return s.Password, err +} + +func decryptPassword(encryptedPassword string, privateKey *rsa.PrivateKey) (string, error) { + b64EncryptedPassword := make([]byte, base64.StdEncoding.DecodedLen(len(encryptedPassword))) + + n, err := base64.StdEncoding.Decode(b64EncryptedPassword, []byte(encryptedPassword)) + if err != nil { + return "", fmt.Errorf("Failed to base64 decode encrypted password: %s", err) + } + password, err := rsa.DecryptPKCS1v15(nil, privateKey, b64EncryptedPassword[0:n]) + if err != nil { + return "", fmt.Errorf("Failed to decrypt password: %s", err) + } + + return string(password), nil +} + +// ExtractImageID gets the ID of the newly created server image from the header. +func (r CreateImageResult) ExtractImageID() (string, error) { + if r.Err != nil { + return "", r.Err + } + // Get the image id from the header + u, err := url.ParseRequestURI(r.Header.Get("Location")) + if err != nil { + return "", err + } + imageID := path.Base(u.Path) + if imageID == "." || imageID == "/" { + return "", fmt.Errorf("Failed to parse the ID of newly created image: %s", u) + } + return imageID, nil +} + +// Server represents a server/instance in the OpenStack cloud. +type Server struct { + // ID uniquely identifies this server amongst all other servers, + // including those not accessible to the current tenant. + ID string `json:"id"` + + // TenantID identifies the tenant owning this server resource. + TenantID string `json:"tenant_id"` + + // UserID uniquely identifies the user account owning the tenant. + UserID string `json:"user_id"` + + // Name contains the human-readable name for the server. + Name string `json:"name"` + + // Updated and Created contain ISO-8601 timestamps of when the state of the + // server last changed, and when it was created. + Updated time.Time `json:"updated"` + Created time.Time `json:"created"` + + // HostID is the host where the server is located in the cloud. + HostID string `json:"hostid"` + + // Status contains the current operational status of the server, + // such as IN_PROGRESS or ACTIVE. + Status string `json:"status"` + + // Progress ranges from 0..100. + // A request made against the server completes only once Progress reaches 100. + Progress int `json:"progress"` + + // AccessIPv4 and AccessIPv6 contain the IP addresses of the server, + // suitable for remote access for administration. + AccessIPv4 string `json:"accessIPv4"` + AccessIPv6 string `json:"accessIPv6"` + + // Image refers to a JSON object, which itself indicates the OS image used to + // deploy the server. + Image map[string]interface{} `json:"-"` + + // Flavor refers to a JSON object, which itself indicates the hardware + // configuration of the deployed server. + Flavor map[string]interface{} `json:"flavor"` + + // Addresses includes a list of all IP addresses assigned to the server, + // keyed by pool. + Addresses map[string]interface{} `json:"addresses"` + + // Metadata includes a list of all user-specified key-value pairs attached + // to the server. + Metadata map[string]string `json:"metadata"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a server reference. + Links []interface{} `json:"links"` + + // KeyName indicates which public key was injected into the server on launch. + KeyName string `json:"key_name"` + + // AdminPass will generally be empty (""). However, it will contain the + // administrative password chosen when provisioning a new server without a + // set AdminPass setting in the first place. + // Note that this is the ONLY time this field will be valid. + AdminPass string `json:"adminPass"` + + // SecurityGroups includes the security groups that this instance has applied + // to it. + SecurityGroups []map[string]interface{} `json:"security_groups"` + + // Fault contains failure information about a server. + Fault Fault `json:"fault"` +} + +type Fault struct { + Code int `json:"code"` + Created time.Time `json:"created"` + Details string `json:"details"` + Message string `json:"message"` +} + +func (r *Server) UnmarshalJSON(b []byte) error { + type tmp Server + var s struct { + tmp + Image interface{} `json:"image"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Server(s.tmp) + + switch t := s.Image.(type) { + case map[string]interface{}: + r.Image = t + case string: + switch t { + case "": + r.Image = nil + } + } + + return err +} + +// ServerPage abstracts the raw results of making a List() request against +// the API. As OpenStack extensions may freely alter the response bodies of +// structures returned to the client, you may only safely access the data +// provided through the ExtractServers call. +type ServerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ServerPage) IsEmpty() (bool, error) { + s, err := ExtractServers(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r ServerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"servers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractServers interprets the results of a single page from a List() call, +// producing a slice of Server entities. +func ExtractServers(r pagination.Page) ([]Server, error) { + var s []Server + err := ExtractServersInto(r, &s) + return s, err +} + +// MetadataResult contains the result of a call for (potentially) multiple +// key-value pairs. Call its Extract method to interpret it as a +// map[string]interface. +type MetadataResult struct { + gophercloud.Result +} + +// GetMetadataResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadataResult struct { + MetadataResult +} + +// ResetMetadataResult contains the result of a Reset operation. Call its +// Extract method to interpret it as a map[string]interface. +type ResetMetadataResult struct { + MetadataResult +} + +// UpdateMetadataResult contains the result of an Update operation. Call its +// Extract method to interpret it as a map[string]interface. +type UpdateMetadataResult struct { + MetadataResult +} + +// MetadatumResult contains the result of a call for individual a single +// key-value pair. +type MetadatumResult struct { + gophercloud.Result +} + +// GetMetadatumResult contains the result of a Get operation. Call its Extract +// method to interpret it as a map[string]interface. +type GetMetadatumResult struct { + MetadatumResult +} + +// CreateMetadatumResult contains the result of a Create operation. Call its +// Extract method to interpret it as a map[string]interface. +type CreateMetadatumResult struct { + MetadatumResult +} + +// DeleteMetadatumResult contains the result of a Delete operation. Call its +// ExtractErr method to determine if the call succeeded or failed. +type DeleteMetadatumResult struct { + gophercloud.ErrResult +} + +// Extract interprets any MetadataResult as a Metadata, if possible. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Metadata map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Metadata, err +} + +// Extract interprets any MetadatumResult as a Metadatum, if possible. +func (r MetadatumResult) Extract() (map[string]string, error) { + var s struct { + Metadatum map[string]string `json:"meta"` + } + err := r.ExtractInto(&s) + return s.Metadatum, err +} + +// Address represents an IP address. +type Address struct { + Version int `json:"version"` + Address string `json:"addr"` +} + +// AddressPage abstracts the raw results of making a ListAddresses() request +// against the API. As OpenStack extensions may freely alter the response bodies +// of structures returned to the client, you may only safely access the data +// provided through the ExtractAddresses call. +type AddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if an AddressPage contains no networks. +func (r AddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractAddresses(r) + return len(addresses) == 0, err +} + +// ExtractAddresses interprets the results of a single page from a +// ListAddresses() call, producing a map of addresses. +func ExtractAddresses(r pagination.Page) (map[string][]Address, error) { + var s struct { + Addresses map[string][]Address `json:"addresses"` + } + err := (r.(AddressPage)).ExtractInto(&s) + return s.Addresses, err +} + +// NetworkAddressPage abstracts the raw results of making a +// ListAddressesByNetwork() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures +// returned to the client, you may only safely access the data provided through +// the ExtractAddresses call. +type NetworkAddressPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a NetworkAddressPage contains no addresses. +func (r NetworkAddressPage) IsEmpty() (bool, error) { + addresses, err := ExtractNetworkAddresses(r) + return len(addresses) == 0, err +} + +// ExtractNetworkAddresses interprets the results of a single page from a +// ListAddressesByNetwork() call, producing a slice of addresses. +func ExtractNetworkAddresses(r pagination.Page) ([]Address, error) { + var s map[string][]Address + err := (r.(NetworkAddressPage)).ExtractInto(&s) + if err != nil { + return nil, err + } + + var key string + for k := range s { + key = k + } + + return s[key], err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/doc.go new file mode 100644 index 000000000000..b3fee3aaccab --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/doc.go @@ -0,0 +1,2 @@ +// servers unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/fixtures.go new file mode 100644 index 000000000000..1d4e99c06b58 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/fixtures.go @@ -0,0 +1,1082 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ServerListBody contains the canned body of a servers.List response. +const ServerListBody = ` +{ + "servers": [ + { + "status": "ACTIVE", + "updated": "2014-09-25T13:10:10Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", + "version": 4, + "addr": "10.0.0.32", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001e", + "OS-SRV-USG:launched_at": "2014-09-25T13:10:10.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "herp", + "created": "2014-09-25T13:10:02Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + }, + { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": "", + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "merp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + } + ] +} +` + +// SingleServerBody is the canned body of a Get request on an existing server. +const SingleServerBody = ` +{ + "server": { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {} + } +} +` + +// FaultyServerBody is the body of a Get request on an existing server +// which has a fault/error. +const FaultyServerBody = ` +{ + "server": { + "status": "ACTIVE", + "updated": "2014-09-25T13:04:49Z", + "hostId": "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + "OS-EXT-SRV-ATTR:host": "devstack", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": 4, + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "links": [ + { + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self" + }, + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark" + } + ], + "key_name": null, + "image": { + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark" + } + ] + }, + "OS-EXT-STS:task_state": null, + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": "instance-0000001d", + "OS-SRV-USG:launched_at": "2014-09-25T13:04:49.000000", + "OS-EXT-SRV-ATTR:hypervisor_hostname": "devstack", + "flavor": { + "id": "1", + "links": [ + { + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark" + } + ] + }, + "id": "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "security_groups": [ + { + "name": "default" + } + ], + "OS-SRV-USG:terminated_at": null, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "9349aff8be7545ac9d2f1d00999a23cd", + "name": "derp", + "created": "2014-09-25T13:04:41Z", + "tenant_id": "fcad67a6189847c4aecfa3c81a05783b", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {}, + "fault": { + "message": "Conflict updating instance c2ce4dea-b73f-4d01-8633-2c6032869281. Expected: {'task_state': [u'spawning']}. Actual: {'task_state': None}", + "code": 500, + "created": "2017-11-11T07:58:39Z", + "details": "Stock details for test" + } + } +} +` + +const ServerPasswordBody = ` +{ + "password": "xlozO3wLCBRWAa2yDjCCVx8vwNPypxnypmRYDa/zErlQ+EzPe1S/Gz6nfmC52mOlOSCRuUOmG7kqqgejPof6M7bOezS387zjq4LSvvwp28zUknzy4YzfFGhnHAdai3TxUJ26pfQCYrq8UTzmKF2Bq8ioSEtVVzM0A96pDh8W2i7BOz6MdoiVyiev/I1K2LsuipfxSJR7Wdke4zNXJjHHP2RfYsVbZ/k9ANu+Nz4iIH8/7Cacud/pphH7EjrY6a4RZNrjQskrhKYed0YERpotyjYk1eDtRe72GrSiXteqCM4biaQ5w3ruS+AcX//PXk3uJ5kC7d67fPXaVz4WaQRYMg==" +} +` + +const ConsoleOutputBody = `{ + "output": "abc" +}` + +var ( + herpTimeCreated, _ = time.Parse(time.RFC3339, "2014-09-25T13:10:02Z") + herpTimeUpdated, _ = time.Parse(time.RFC3339, "2014-09-25T13:10:10Z") + // ServerHerp is a Server struct that should correspond to the first result in ServerListBody. + ServerHerp = servers.Server{ + Status: "ACTIVE", + Updated: herpTimeUpdated, + HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:7c:1b:2b", + "version": float64(4), + "addr": "10.0.0.32", + "OS-EXT-IPS:type": "fixed", + }, + }, + }, + Links: []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + "rel": "bookmark", + }, + }, + Image: map[string]interface{}{ + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "1", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark", + }, + }, + }, + ID: "ef079b0c-e610-4dfb-b1aa-b49f07ac48e5", + UserID: "9349aff8be7545ac9d2f1d00999a23cd", + Name: "herp", + Created: herpTimeCreated, + TenantID: "fcad67a6189847c4aecfa3c81a05783b", + Metadata: map[string]string{}, + SecurityGroups: []map[string]interface{}{ + map[string]interface{}{ + "name": "default", + }, + }, + } + + derpTimeCreated, _ = time.Parse(time.RFC3339, "2014-09-25T13:04:41Z") + derpTimeUpdated, _ = time.Parse(time.RFC3339, "2014-09-25T13:04:49Z") + // ServerDerp is a Server struct that should correspond to the second server in ServerListBody. + ServerDerp = servers.Server{ + Status: "ACTIVE", + Updated: derpTimeUpdated, + HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": float64(4), + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed", + }, + }, + }, + Links: []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark", + }, + }, + Image: map[string]interface{}{ + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "rel": "bookmark", + }, + }, + }, + Flavor: map[string]interface{}{ + "id": "1", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark", + }, + }, + }, + ID: "9e5476bd-a4ec-4653-93d6-72c93aa682ba", + UserID: "9349aff8be7545ac9d2f1d00999a23cd", + Name: "derp", + Created: derpTimeCreated, + TenantID: "fcad67a6189847c4aecfa3c81a05783b", + Metadata: map[string]string{}, + SecurityGroups: []map[string]interface{}{ + map[string]interface{}{ + "name": "default", + }, + }, + } + + ConsoleOutput = "abc" + + merpTimeCreated, _ = time.Parse(time.RFC3339, "2014-09-25T13:04:41Z") + merpTimeUpdated, _ = time.Parse(time.RFC3339, "2014-09-25T13:04:49Z") + // ServerMerp is a Server struct that should correspond to the second server in ServerListBody. + ServerMerp = servers.Server{ + Status: "ACTIVE", + Updated: merpTimeUpdated, + HostID: "29d3c8c896a45aa4c34e52247875d7fefc3d94bbcc9f622b5d204362", + Addresses: map[string]interface{}{ + "private": []interface{}{ + map[string]interface{}{ + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:9e:89:be", + "version": float64(4), + "addr": "10.0.0.31", + "OS-EXT-IPS:type": "fixed", + }, + }, + }, + Links: []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/v2/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/servers/9e5476bd-a4ec-4653-93d6-72c93aa682ba", + "rel": "bookmark", + }, + }, + Image: nil, + Flavor: map[string]interface{}{ + "id": "1", + "links": []interface{}{ + map[string]interface{}{ + "href": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/flavors/1", + "rel": "bookmark", + }, + }, + }, + ID: "9e5476bd-a4ec-4653-93d6-72c93aa682bb", + UserID: "9349aff8be7545ac9d2f1d00999a23cd", + Name: "merp", + Created: merpTimeCreated, + TenantID: "fcad67a6189847c4aecfa3c81a05783b", + Metadata: map[string]string{}, + SecurityGroups: []map[string]interface{}{ + map[string]interface{}{ + "name": "default", + }, + }, + } + + faultTimeCreated, _ = time.Parse(time.RFC3339, "2017-11-11T07:58:39Z") + DerpFault = servers.Fault{ + Code: 500, + Created: faultTimeCreated, + Details: "Stock details for test", + Message: "Conflict updating instance c2ce4dea-b73f-4d01-8633-2c6032869281. " + + "Expected: {'task_state': [u'spawning']}. Actual: {'task_state': None}", + } +) + +type CreateOptsWithCustomField struct { + servers.CreateOpts + Foo string `json:"foo,omitempty"` +} + +func (opts CreateOptsWithCustomField) ToServerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "server") +} + +// HandleServerCreationSuccessfully sets up the test server to respond to a server creation request +// with a given response. +func HandleServerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "server": { + "name": "derp", + "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", + "flavorRef": "1" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) + + th.Mux.HandleFunc("/images/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "images": [ + { + "status": "ACTIVE", + "updated": "2014-09-23T12:54:56Z", + "id": "f3e4a95d-1f4f-4989-97ce-f3a1fb8c04d7", + "OS-EXT-IMG-SIZE:size": 476704768, + "name": "F17-x86_64-cfntools", + "created": "2014-09-23T12:54:52Z", + "minDisk": 0, + "progress": 100, + "minRam": 0 + }, + { + "status": "ACTIVE", + "updated": "2014-09-23T12:51:43Z", + "id": "f90f6034-2570-4974-8351-6b49732ef2eb", + "OS-EXT-IMG-SIZE:size": 13167616, + "name": "cirros-0.3.2-x86_64-disk", + "created": "2014-09-23T12:51:42Z", + "minDisk": 0, + "progress": 100, + "minRam": 0 + } + ] + } + `) + case "2": + fmt.Fprintf(w, `{ "images": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) + + th.Mux.HandleFunc("/flavors/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "flavors": [ + { + "id": "1", + "name": "m1.tiny", + "disk": 1, + "ram": 512, + "vcpus": 1, + "swap":"" + }, + { + "id": "2", + "name": "m2.small", + "disk": 10, + "ram": 1024, + "vcpus": 2, + "swap": 1000 + } + ], + "flavors_links": [ + { + "href": "%s/flavors/detail?marker=2", + "rel": "next" + } + ] + } + `, th.Server.URL) + case "2": + fmt.Fprintf(w, `{ "flavors": [] }`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleServerCreationWithCustomFieldSuccessfully sets up the test server to respond to a server creation request +// with a given response. +func HandleServerCreationWithCustomFieldSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "server": { + "name": "derp", + "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", + "flavorRef": "1", + "foo": "bar" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleServerCreationWithUserdata sets up the test server to respond to a server creation request +// with a given response. +func HandleServerCreationWithUserdata(t *testing.T, response string) { + th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "server": { + "name": "derp", + "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", + "flavorRef": "1", + "user_data": "dXNlcmRhdGEgc3RyaW5n" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleServerCreationWithMetadata sets up the test server to respond to a server creation request +// with a given response. +func HandleServerCreationWithMetadata(t *testing.T, response string) { + th.Mux.HandleFunc("/servers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "server": { + "name": "derp", + "imageRef": "f90f6034-2570-4974-8351-6b49732ef2eb", + "flavorRef": "1", + "metadata": { + "abc": "def" + } + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleServerListSuccessfully sets up the test server to respond to a server List request. +func HandleServerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ServerListBody) + case "9e5476bd-a4ec-4653-93d6-72c93aa682ba": + fmt.Fprintf(w, `{ "servers": [] }`) + default: + t.Fatalf("/servers/detail invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleServerDeletionSuccessfully sets up the test server to respond to a server deletion request. +func HandleServerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleServerForceDeletionSuccessfully sets up the test server to respond to a server force deletion +// request. +func HandleServerForceDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "forceDelete": "" }`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleServerGetSuccessfully sets up the test server to respond to a server Get request. +func HandleServerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleServerBody) + }) +} + +// HandleServerGetFaultSuccessfully sets up the test server to respond to a server Get +// request which contains a fault. +func HandleServerGetFaultSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, FaultyServerBody) + }) +} + +// HandleServerUpdateSuccessfully sets up the test server to respond to a server Update request. +func HandleServerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ "server": { "name": "new-name" } }`) + + fmt.Fprintf(w, SingleServerBody) + }) +} + +// HandleAdminPasswordChangeSuccessfully sets up the test server to respond to a server password +// change request. +func HandleAdminPasswordChangeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "changePassword": { "adminPass": "new-password" } }`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleRebootSuccessfully sets up the test server to respond to a reboot request with success. +func HandleRebootSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "reboot": { "type": "SOFT" } }`) + + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleShowConsoleOutputSuccessfully sets up the test server to respond to a os-getConsoleOutput request with success. +func HandleShowConsoleOutputSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "os-getConsoleOutput": { "length": 50 } }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleRebuildSuccessfully sets up the test server to respond to a rebuild request with success. +func HandleRebuildSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "rebuild": { + "name": "new-name", + "adminPass": "swordfish", + "imageRef": "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + "accessIPv4": "1.2.3.4" + } + } + `) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleMetadatumGetSuccessfully sets up the test server to respond to a metadatum Get request. +func HandleMetadatumGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) + }) +} + +// HandleMetadatumCreateSuccessfully sets up the test server to respond to a metadatum Create request. +func HandleMetadatumCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "meta": { + "foo": "bar" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "meta": {"foo":"bar"}}`)) + }) +} + +// HandleMetadatumDeleteSuccessfully sets up the test server to respond to a metadatum Delete request. +func HandleMetadatumDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleMetadataGetSuccessfully sets up the test server to respond to a metadata Get request. +func HandleMetadataGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.WriteHeader(http.StatusOK) + w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) + }) +} + +// HandleMetadataResetSuccessfully sets up the test server to respond to a metadata Create request. +func HandleMetadataResetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "metadata": { + "foo": "bar", + "this": "that" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "metadata": {"foo":"bar", "this":"that"}}`)) + }) +} + +// HandleMetadataUpdateSuccessfully sets up the test server to respond to a metadata Update request. +func HandleMetadataUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "metadata": { + "foo": "baz", + "this": "those" + } + }`) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + w.Write([]byte(`{ "metadata": {"foo":"baz", "this":"those"}}`)) + }) +} + +// ListAddressesExpected represents an expected repsonse from a ListAddresses request. +var ListAddressesExpected = map[string][]servers.Address{ + "public": []servers.Address{ + { + Version: 4, + Address: "50.56.176.35", + }, + { + Version: 6, + Address: "2001:4800:790e:510:be76:4eff:fe04:84a8", + }, + }, + "private": []servers.Address{ + { + Version: 4, + Address: "10.180.3.155", + }, + }, +} + +// HandleAddressListSuccessfully sets up the test server to respond to a ListAddresses request. +func HandleAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "addresses": { + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:790e:510:be76:4eff:fe04:84a8" + } + ], + "private": [ + { + "version": 4, + "addr": "10.180.3.155" + } + ] + } + }`) + }) +} + +// ListNetworkAddressesExpected represents an expected repsonse from a ListAddressesByNetwork request. +var ListNetworkAddressesExpected = []servers.Address{ + { + Version: 4, + Address: "50.56.176.35", + }, + { + Version: 6, + Address: "2001:4800:790e:510:be76:4eff:fe04:84a8", + }, +} + +// HandleNetworkAddressListSuccessfully sets up the test server to respond to a ListAddressesByNetwork request. +func HandleNetworkAddressListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/asdfasdfasdf/ips/public", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "public": [ + { + "version": 4, + "addr": "50.56.176.35" + }, + { + "version": 6, + "addr": "2001:4800:790e:510:be76:4eff:fe04:84a8" + } + ] + }`) + }) +} + +// HandleCreateServerImageSuccessfully sets up the test server to respond to a TestCreateServerImage request. +func HandleCreateServerImageSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/serverimage/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.Header().Add("Location", "https://0.0.0.0/images/xxxx-xxxxx-xxxxx-xxxx") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandlePasswordGetSuccessfully sets up the test server to respond to a password Get request. +func HandlePasswordGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/servers/1234asdf/os-server-password", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, ServerPasswordBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/requests_test.go new file mode 100644 index 000000000000..46ae45dbf460 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/requests_test.go @@ -0,0 +1,552 @@ +package testing + +import ( + "encoding/base64" + "encoding/json" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/availabilityzones" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/diskconfig" + "github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/extendedstatus" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListServers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerListSuccessfully(t) + + pages := 0 + err := servers.List(client.ServiceClient(), servers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := servers.ExtractServers(page) + if err != nil { + return false, err + } + + if len(actual) != 3 { + t.Fatalf("Expected 3 servers, got %d", len(actual)) + } + th.CheckDeepEquals(t, ServerHerp, actual[0]) + th.CheckDeepEquals(t, ServerDerp, actual[1]) + th.CheckDeepEquals(t, ServerMerp, actual[2]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllServers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerListSuccessfully(t) + + allPages, err := servers.List(client.ServiceClient(), servers.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := servers.ExtractServers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ServerHerp, actual[0]) + th.CheckDeepEquals(t, ServerDerp, actual[1]) +} + +func TestListAllServersWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerListSuccessfully(t) + + type ServerWithExt struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + extendedstatus.ServerExtendedStatusExt + diskconfig.ServerDiskConfigExt + } + + allPages, err := servers.List(client.ServiceClient(), servers.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + var actual []ServerWithExt + err = servers.ExtractServersInto(allPages, &actual) + th.AssertNoErr(t, err) + th.AssertEquals(t, 3, len(actual)) + th.AssertEquals(t, "nova", actual[0].AvailabilityZone) + th.AssertEquals(t, "RUNNING", actual[0].PowerState.String()) + th.AssertEquals(t, "", actual[0].TaskState) + th.AssertEquals(t, "active", actual[0].VmState) + th.AssertEquals(t, diskconfig.Manual, actual[0].DiskConfig) +} + +func TestCreateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationSuccessfully(t, SingleServerBody) + + actual, err := servers.Create(client.ServiceClient(), servers.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestCreateServerWithCustomField(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationWithCustomFieldSuccessfully(t, SingleServerBody) + + actual, err := servers.Create(client.ServiceClient(), CreateOptsWithCustomField{ + CreateOpts: servers.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + }, + Foo: "bar", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestCreateServerWithMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationWithMetadata(t, SingleServerBody) + + actual, err := servers.Create(client.ServiceClient(), servers.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + Metadata: map[string]string{ + "abc": "def", + }, + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestCreateServerWithUserdataString(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationWithUserdata(t, SingleServerBody) + + actual, err := servers.Create(client.ServiceClient(), servers.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + UserData: []byte("userdata string"), + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestCreateServerWithUserdataEncoded(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationWithUserdata(t, SingleServerBody) + + encoded := base64.StdEncoding.EncodeToString([]byte("userdata string")) + + actual, err := servers.Create(client.ServiceClient(), servers.CreateOpts{ + Name: "derp", + ImageRef: "f90f6034-2570-4974-8351-6b49732ef2eb", + FlavorRef: "1", + UserData: []byte(encoded), + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestCreateServerWithImageNameAndFlavorName(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerCreationSuccessfully(t, SingleServerBody) + + actual, err := servers.Create(client.ServiceClient(), servers.CreateOpts{ + Name: "derp", + ImageName: "cirros-0.3.2-x86_64-disk", + FlavorName: "m1.tiny", + ServiceClient: client.ServiceClient(), + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestDeleteServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerDeletionSuccessfully(t) + + res := servers.Delete(client.ServiceClient(), "asdfasdfasdf") + th.AssertNoErr(t, res.Err) +} + +func TestForceDeleteServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerForceDeletionSuccessfully(t) + + res := servers.ForceDelete(client.ServiceClient(), "asdfasdfasdf") + th.AssertNoErr(t, res.Err) +} + +func TestGetServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerGetSuccessfully(t) + + client := client.ServiceClient() + actual, err := servers.Get(client, "1234asdf").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestGetFaultyServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerGetFaultSuccessfully(t) + + client := client.ServiceClient() + actual, err := servers.Get(client, "1234asdf").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + FaultyServer := ServerDerp + FaultyServer.Fault = DerpFault + th.CheckDeepEquals(t, FaultyServer, *actual) +} + +func TestGetServerWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerGetSuccessfully(t) + + var s struct { + servers.Server + availabilityzones.ServerAvailabilityZoneExt + extendedstatus.ServerExtendedStatusExt + diskconfig.ServerDiskConfigExt + } + + err := servers.Get(client.ServiceClient(), "1234asdf").ExtractInto(&s) + th.AssertNoErr(t, err) + th.AssertEquals(t, "nova", s.AvailabilityZone) + th.AssertEquals(t, "RUNNING", s.PowerState.String()) + th.AssertEquals(t, "", s.TaskState) + th.AssertEquals(t, "active", s.VmState) + th.AssertEquals(t, diskconfig.Manual, s.DiskConfig) + + err = servers.Get(client.ServiceClient(), "1234asdf").ExtractInto(s) + if err == nil { + t.Errorf("Expected error when providing non-pointer struct") + } +} + +func TestUpdateServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleServerUpdateSuccessfully(t) + + client := client.ServiceClient() + actual, err := servers.Update(client, "1234asdf", servers.UpdateOpts{Name: "new-name"}).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestChangeServerAdminPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAdminPasswordChangeSuccessfully(t) + + res := servers.ChangeAdminPassword(client.ServiceClient(), "1234asdf", "new-password") + th.AssertNoErr(t, res.Err) +} + +func TestShowConsoleOutput(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleShowConsoleOutputSuccessfully(t, ConsoleOutputBody) + + outputOpts := &servers.ShowConsoleOutputOpts{ + Length: 50, + } + actual, err := servers.ShowConsoleOutput(client.ServiceClient(), "1234asdf", outputOpts).Extract() + + th.AssertNoErr(t, err) + th.AssertByteArrayEquals(t, []byte(ConsoleOutput), []byte(actual)) +} + +func TestGetPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePasswordGetSuccessfully(t) + + res := servers.GetPassword(client.ServiceClient(), "1234asdf") + th.AssertNoErr(t, res.Err) +} + +func TestRebootServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRebootSuccessfully(t) + + res := servers.Reboot(client.ServiceClient(), "1234asdf", servers.RebootOpts{ + Type: servers.SoftReboot, + }) + th.AssertNoErr(t, res.Err) +} + +func TestRebuildServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRebuildSuccessfully(t, SingleServerBody) + + opts := servers.RebuildOpts{ + Name: "new-name", + AdminPass: "swordfish", + ImageID: "http://104.130.131.164:8774/fcad67a6189847c4aecfa3c81a05783b/images/f90f6034-2570-4974-8351-6b49732ef2eb", + AccessIPv4: "1.2.3.4", + } + + actual, err := servers.Rebuild(client.ServiceClient(), "1234asdf", opts).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ServerDerp, *actual) +} + +func TestResizeServer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "resize": { "flavorRef": "2" } }`) + + w.WriteHeader(http.StatusAccepted) + }) + + res := servers.Resize(client.ServiceClient(), "1234asdf", servers.ResizeOpts{FlavorRef: "2"}) + th.AssertNoErr(t, res.Err) +} + +func TestConfirmResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "confirmResize": null }`) + + w.WriteHeader(http.StatusNoContent) + }) + + res := servers.ConfirmResize(client.ServiceClient(), "1234asdf") + th.AssertNoErr(t, res.Err) +} + +func TestRevertResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/servers/1234asdf/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ "revertResize": null }`) + + w.WriteHeader(http.StatusAccepted) + }) + + res := servers.RevertResize(client.ServiceClient(), "1234asdf") + th.AssertNoErr(t, res.Err) +} + +func TestGetMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumGetSuccessfully(t) + + expected := map[string]string{"foo": "bar"} + actual, err := servers.Metadatum(client.ServiceClient(), "1234asdf", "foo").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestCreateMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumCreateSuccessfully(t) + + expected := map[string]string{"foo": "bar"} + actual, err := servers.CreateMetadatum(client.ServiceClient(), "1234asdf", servers.MetadatumOpts{"foo": "bar"}).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteMetadatum(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadatumDeleteSuccessfully(t) + + err := servers.DeleteMetadatum(client.ServiceClient(), "1234asdf", "foo").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataGetSuccessfully(t) + + expected := map[string]string{"foo": "bar", "this": "that"} + actual, err := servers.Metadata(client.ServiceClient(), "1234asdf").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestResetMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataResetSuccessfully(t) + + expected := map[string]string{"foo": "bar", "this": "that"} + actual, err := servers.ResetMetadata(client.ServiceClient(), "1234asdf", servers.MetadataOpts{ + "foo": "bar", + "this": "that", + }).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestUpdateMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleMetadataUpdateSuccessfully(t) + + expected := map[string]string{"foo": "baz", "this": "those"} + actual, err := servers.UpdateMetadata(client.ServiceClient(), "1234asdf", servers.MetadataOpts{ + "foo": "baz", + "this": "those", + }).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestListAddresses(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddressListSuccessfully(t) + + expected := ListAddressesExpected + pages := 0 + err := servers.ListAddresses(client.ServiceClient(), "asdfasdfasdf").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := servers.ExtractAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 networks, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestListAddressesByNetwork(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleNetworkAddressListSuccessfully(t) + + expected := ListNetworkAddressesExpected + pages := 0 + err := servers.ListAddressesByNetwork(client.ServiceClient(), "asdfasdfasdf", "public").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := servers.ExtractNetworkAddresses(page) + th.AssertNoErr(t, err) + + if len(actual) != 2 { + t.Fatalf("Expected 2 addresses, got %d", len(actual)) + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, pages) +} + +func TestCreateServerImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateServerImageSuccessfully(t) + + _, err := servers.CreateImage(client.ServiceClient(), "serverimage", servers.CreateImageOpts{Name: "test"}).ExtractImageID() + th.AssertNoErr(t, err) +} + +func TestMarshalPersonality(t *testing.T) { + name := "/etc/test" + contents := []byte("asdfasdf") + + personality := servers.Personality{ + &servers.File{ + Path: name, + Contents: contents, + }, + } + + data, err := json.Marshal(personality) + if err != nil { + t.Fatal(err) + } + + var actual []map[string]string + err = json.Unmarshal(data, &actual) + if err != nil { + t.Fatal(err) + } + + if len(actual) != 1 { + t.Fatal("expected personality length 1") + } + + if actual[0]["path"] != name { + t.Fatal("file path incorrect") + } + + if actual[0]["contents"] != base64.StdEncoding.EncodeToString(contents) { + t.Fatal("file contents incorrect") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/results_test.go new file mode 100644 index 000000000000..d4773dc91629 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/testing/results_test.go @@ -0,0 +1,110 @@ +package testing + +import ( + "crypto/rsa" + "encoding/json" + "fmt" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/compute/v2/servers" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" + "golang.org/x/crypto/ssh" +) + +// Fail - No password in JSON. +func TestExtractPassword_no_pwd_data(t *testing.T) { + + var dejson interface{} + err := json.Unmarshal([]byte(`{ "Crappy data": ".-.-." }`), &dejson) + if err != nil { + t.Fatalf("%s", err) + } + resp := servers.GetPasswordResult{Result: gophercloud.Result{Body: dejson}} + + pwd, err := resp.ExtractPassword(nil) + th.AssertEquals(t, pwd, "") +} + +// Ok - return encrypted password when no private key is given. +func TestExtractPassword_encrypted_pwd(t *testing.T) { + + var dejson interface{} + sejson := []byte(`{"password":"PP8EnwPO9DhEc8+O/6CKAkPF379mKsUsfFY6yyw0734XXvKsSdV9KbiHQ2hrBvzeZxtGMrlFaikVunCRizyLLWLMuOi4hoH+qy9F9sQid61gQIGkxwDAt85d/7Eau2/KzorFnZhgxArl7IiqJ67X6xjKkR3zur+Yp3V/mtVIehpPYIaAvPbcp2t4mQXl1I9J8yrQfEZOctLL1L4heDEVXnxvNihVLK6pivlVggp6SZCtjj9cduZGrYGsxsOCso1dqJQr7GCojfwvuLOoG0OYwEGuWVTZppxWxi/q1QgeHFhGKA5QUXlz7pS71oqpjYsTeViuHnfvlqb5TVYZpQ1haw=="}`) + + err := json.Unmarshal(sejson, &dejson) + fmt.Printf("%v\n", dejson) + if err != nil { + t.Fatalf("%s", err) + } + resp := servers.GetPasswordResult{Result: gophercloud.Result{Body: dejson}} + + pwd, err := resp.ExtractPassword(nil) + th.AssertNoErr(t, err) + th.AssertEquals(t, "PP8EnwPO9DhEc8+O/6CKAkPF379mKsUsfFY6yyw0734XXvKsSdV9KbiHQ2hrBvzeZxtGMrlFaikVunCRizyLLWLMuOi4hoH+qy9F9sQid61gQIGkxwDAt85d/7Eau2/KzorFnZhgxArl7IiqJ67X6xjKkR3zur+Yp3V/mtVIehpPYIaAvPbcp2t4mQXl1I9J8yrQfEZOctLL1L4heDEVXnxvNihVLK6pivlVggp6SZCtjj9cduZGrYGsxsOCso1dqJQr7GCojfwvuLOoG0OYwEGuWVTZppxWxi/q1QgeHFhGKA5QUXlz7pS71oqpjYsTeViuHnfvlqb5TVYZpQ1haw==", pwd) +} + +// Ok - return decrypted password when private key is given. +// Decrytion can be verified by: +// echo "" | base64 -D | openssl rsautl -decrypt -inkey +func TestExtractPassword_decrypted_pwd(t *testing.T) { + + privateKey, err := ssh.ParseRawPrivateKey([]byte(` +-----BEGIN RSA PRIVATE KEY----- +MIIEpQIBAAKCAQEAo1ODZgwMVdTJYim9UYuYhowoPMhGEuV5IRZjcJ315r7RBSC+ +yEiBb1V+jhf+P8fzAyU35lkBzZGDr7E3jxSesbOuYT8cItQS4ErUnI1LGuqvMxwv +X3GMyE/HmOcaiODF1XZN3Ur5pMJdVknnmczgUsW0hT98Udrh3MQn9WSuh/6LRy6+ +x1QsKHOCLFPnkhWa3LKyxmpQq/Gvhz+6NLe+gt8MFullA5mKQxBJ/K6laVHeaMlw +JG3GCX0EZhRlvzoV8koIBKZtbKFolFr8ZtxBm3R5LvnyrtOvp22sa+xeItUT5kG1 +ZnbGNdK87oYW+VigEUfzT/+8R1i6E2QIXoeZiQIDAQABAoIBAQCVZ70IqbbTAW8j +RAlyQh/J3Qal65LmkFJJKUDX8TfT1/Q/G6BKeMEmxm+Zrmsfj1pHI1HKftt+YEG1 +g4jOc09kQXkgbmnfll6aHPn3J+1vdwXD3GGdjrL5PrnYrngAhJWU2r8J0x8hT8ew +OrUJZXhDX6XuSpAAFRmOKUZgXbSmo4X+LZX76ACnarselJt5FL724ECvpWJ7xxC4 +FMzvp4RqMmNFvv/Uq9lE/EmoSk4dviYyIZZ16DbDNyc9k/sGqCAMktCEwZ3EQm// +S5bkNhgP6oUXjluWy53aPRgykEylgDWo5SSdSEyKnw/fciU0xdprA9JrBGIcTyHS +/k2kgD4xAoGBANTkJ88Q0YrxX3fZNZVqcn00XKTxPGmxN5LRs7eV743q30AxK5Db +QU8iwaAA1IKUWV5DLhgUTNsDCOPUPue4aOSBD3/sj+WEmvIhj7afDL5didkYHsqf +fDnhFHq7y/3i57d428C7BwwR79pGWVyi7vH3pfu9A1iwl1aNOae+zvbVAoGBAMRm +AmwQ9fJ3Qc44jysFK/yliLRGdShjkMMah5G3JlrelwfPtwPwEL2EHHhJB/C1acMs +n6Q6RaoF6WNSZUY65ksQg7aPOYf2X0FTFwQJvwDJ4qlWjmq7w+tQ0AoGJG+dVUmQ +zHZ/Y+HokSXzz9c4oevk4v/rMgAQ00WHrTdtIhnlAoGBALIJJ72D7CkNGHCq5qPQ +xHQukPejgolFGhufYXM7YX3GmPMe67cVlTVv9Isxhoa5N0+cUPT0LR3PGOUm/4Bb +eOT3hZXOqLwhvE6XgI8Rzd95bClwgXekDoh80dqeKMdmta961BQGlKskaPiacmsF +G1yhZV70P9Mwwy8vpbLB4GUNAoGAbTwbjsWkNfa0qCF3J8NZoszjCvnBQfSW2J1R +1+8ZKyNwt0yFi3Ajr3TibNiZzPzp1T9lj29FvfpJxA9Y+sXZvthxmcFxizix5GB1 +ha5yCNtA8VSOI7lJkAFDpL+j1lyYyjD6N9JE2KqEyKoh6J+8F7sXsqW7CqRRDfQX +mKNfey0CgYEAxcEoNoADN2hRl7qY9rbQfVvQb3RkoQkdHhl9gpLFCcV32IP8R4xg +09NbQK5OmgcIuZhLVNzTmUHJbabEGeXqIFIV0DsqECAt3WzbDyKQO23VJysFD46c +KSde3I0ybDz7iS2EtceKB7m4C0slYd+oBkm4efuF00rCOKDwpFq45m0= +-----END RSA PRIVATE KEY----- +`)) + if err != nil { + t.Fatalf("Error parsing private key: %s\n", err) + } + + var dejson interface{} + sejson := []byte(`{"password":"PP8EnwPO9DhEc8+O/6CKAkPF379mKsUsfFY6yyw0734XXvKsSdV9KbiHQ2hrBvzeZxtGMrlFaikVunCRizyLLWLMuOi4hoH+qy9F9sQid61gQIGkxwDAt85d/7Eau2/KzorFnZhgxArl7IiqJ67X6xjKkR3zur+Yp3V/mtVIehpPYIaAvPbcp2t4mQXl1I9J8yrQfEZOctLL1L4heDEVXnxvNihVLK6pivlVggp6SZCtjj9cduZGrYGsxsOCso1dqJQr7GCojfwvuLOoG0OYwEGuWVTZppxWxi/q1QgeHFhGKA5QUXlz7pS71oqpjYsTeViuHnfvlqb5TVYZpQ1haw=="}`) + + err = json.Unmarshal(sejson, &dejson) + fmt.Printf("%v\n", dejson) + if err != nil { + t.Fatalf("%s", err) + } + resp := servers.GetPasswordResult{Result: gophercloud.Result{Body: dejson}} + + pwd, err := resp.ExtractPassword(privateKey.(*rsa.PrivateKey)) + th.AssertNoErr(t, err) + th.AssertEquals(t, "ruZKK0tqxRfYm5t7lSJq", pwd) +} + +func TestListAddressesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddressListSuccessfully(t) + + allPages, err := servers.ListAddresses(client.ServiceClient(), "asdfasdfasdf").AllPages() + th.AssertNoErr(t, err) + _, err = servers.ExtractAddresses(allPages) + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go new file mode 100644 index 000000000000..e892e8d92597 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/urls.go @@ -0,0 +1,51 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers") +} + +func listURL(client *gophercloud.ServiceClient) string { + return createURL(client) +} + +func listDetailURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("servers", "detail") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return deleteURL(client, id) +} + +func actionURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "action") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("servers", id, "metadata", key) +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "metadata") +} + +func listAddressesURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "ips") +} + +func listAddressesByNetworkURL(client *gophercloud.ServiceClient, id, network string) string { + return client.ServiceURL("servers", id, "ips", network) +} + +func passwordURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("servers", id, "os-server-password") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go new file mode 100644 index 000000000000..cadef054506a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/compute/v2/servers/util.go @@ -0,0 +1,21 @@ +package servers + +import "github.com/gophercloud/gophercloud" + +// WaitForStatus will continually poll a server until it successfully +// transitions to a specified status. It will do this for at most the number +// of seconds specified. +func WaitForStatus(c *gophercloud.ServiceClient, id, status string, secs int) error { + return gophercloud.WaitFor(secs, func() (bool, error) { + current, err := Get(c, id).Extract() + if err != nil { + return false, err + } + + if current.Status == status { + return true, nil + } + + return false, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/doc.go new file mode 100644 index 000000000000..5237759eb239 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/doc.go @@ -0,0 +1,4 @@ +// Package capsules contains functionality for working with Zun capsule +// resources. A capsule is a container group, as the co-located and +// co-scheduled unit, is the same like pod in Kubernetes. +package capsules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/errors.go new file mode 100644 index 000000000000..6542e66ec835 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/errors.go @@ -0,0 +1,15 @@ +package capsules + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +type ErrInvalidDataFormat struct { + gophercloud.BaseError +} + +func (e ErrInvalidDataFormat) Error() string { + return fmt.Sprintf("Data in neither json nor yaml format.") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/requests.go new file mode 100644 index 000000000000..611c27677a0a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/requests.go @@ -0,0 +1,102 @@ +package capsules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToCapsuleCreateMap() (map[string]interface{}, error) +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToCapsuleListQuery() (string, error) +} + +// Get requests details on a single capsule, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template `json:"-" required:"true"` +} + +// ToCapsuleCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToCapsuleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + b["template"] = string(opts.TemplateOpts.Bin) + + return b, nil +} + +// Create implements create capsule request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToCapsuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{202}}) + return +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the capsule attributes you want to see returned. Marker and Limit are used +// for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + AllProjects bool `q:"all_projects"` +} + +// ToCapsuleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToCapsuleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list capsules accessible to you. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToCapsuleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return CapsulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete implements Capsule delete request. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/results.go new file mode 100644 index 000000000000..05d17aa9abe8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/results.go @@ -0,0 +1,323 @@ +package capsules + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a capsule resource. +func (r commonResult) Extract() (*Capsule, error) { + var s *Capsule + err := r.ExtractInto(&s) + return s, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as a Capsule. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +type CapsulePage struct { + pagination.LinkedPageBase +} + +// Represents a Capsule +type Capsule struct { + // UUID for the capsule + UUID string `json:"uuid"` + + // User ID for the capsule + UserID string `json:"user_id"` + + // Project ID for the capsule + ProjectID string `json:"project_id"` + + // cpu for the capsule + CPU float64 `json:"cpu"` + + // Memory for the capsule + Memory string `json:"memory"` + + // The name of the capsule + MetaName string `json:"meta_name"` + + // Indicates whether capsule is currently operational. + Status string `json:"status"` + + // Indicates whether capsule is currently operational. + StatusReason string `json:"status_reason"` + + // The created time of the capsule. + CreatedAt time.Time `json:"-"` + + // The updated time of the capsule. + UpdatedAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a capsule reference. + Links []interface{} `json:"links"` + + // The capsule version + CapsuleVersion string `json:"capsule_version"` + + // The capsule restart policy + RestartPolicy string `json:"restart_policy"` + + // The capsule metadata labels + MetaLabels map[string]string `json:"meta_labels"` + + // The list of containers uuids inside capsule. + ContainersUUIDs []string `json:"containers_uuids"` + + // The capsule IP addresses + Addresses map[string][]Address `json:"addresses"` + + // The capsule volume attached information + VolumesInfo map[string][]string `json:"volumes_info"` + + // The container object inside capsule + Containers []Container `json:"containers"` + + // The capsule host + Host string `json:"host"` +} + +type Container struct { + // The Container IP addresses + Addresses map[string][]Address `json:"addresses"` + + // UUID for the container + UUID string `json:"uuid"` + + // User ID for the container + UserID string `json:"user_id"` + + // Project ID for the container + ProjectID string `json:"project_id"` + + // cpu for the container + CPU float64 `json:"cpu"` + + // Memory for the container + Memory string `json:"memory"` + + // Image for the container + Image string `json:"image"` + + // The container container + Labels map[string]string `json:"labels"` + + // The created time of the container + CreatedAt time.Time `json:"-"` + + // The updated time of the container + UpdatedAt time.Time `json:"-"` + + // The started time of the container + StartedAt time.Time `json:"-"` + + // Name for the container + Name string `json:"name"` + + // Links includes HTTP references to the itself, useful for passing along to + // other APIs that might want a capsule reference. + Links []interface{} `json:"links"` + + // auto remove flag token for the container + AutoRemove bool `json:"auto_remove"` + + // Host for the container + Host string `json:"host"` + + // Work directory for the container + WorkDir string `json:"workdir"` + + // Disk for the container + Disk int `json:"disk"` + + // Image pull policy for the container + ImagePullPolicy string `json:"image_pull_policy"` + + // Task state for the container + TaskState string `json:"task_state"` + + // Host name for the container + HostName string `json:"hostname"` + + // Environment for the container + Environment map[string]string `json:"environment"` + + // Status for the container + Status string `json:"status"` + + // Auto Heal flag for the container + AutoHeal bool `json:"auto_heal"` + + // Status details for the container + StatusDetail string `json:"status_detail"` + + // Status reason for the container + StatusReason string `json:"status_reason"` + + // Image driver for the container + ImageDriver string `json:"image_driver"` + + // Command for the container + Command []string `json:"command"` + + // Image for the container + Runtime string `json:"runtime"` + + // Interactive flag for the container + Interactive bool `json:"interactive"` + + // Restart Policy for the container + RestartPolicy map[string]string `json:"restart_policy"` + + // Ports information for the container + Ports []int `json:"ports"` + + // Security groups for the container + SecurityGroups []string `json:"security_groups"` +} + +type Address struct { + PreserveOnDelete bool `json:"preserve_on_delete"` + Addr string `json:"addr"` + Port string `json:"port"` + Version float64 `json:"version"` + SubnetID string `json:"subnet_id"` +} + +// NextPageURL is invoked when a paginated collection of capsules has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r CapsulePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, nil +} + +// IsEmpty checks whether a CapsulePage struct is empty. +func (r CapsulePage) IsEmpty() (bool, error) { + is, err := ExtractCapsules(r) + return len(is) == 0, err +} + +// ExtractCapsules accepts a Page struct, specifically a CapsulePage struct, +// and extracts the elements into a slice of Capsule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractCapsules(r pagination.Page) ([]Capsule, error) { + var s struct { + Capsules []Capsule `json:"capsules"` + } + err := (r.(CapsulePage)).ExtractInto(&s) + return s.Capsules, err +} + +func (r *Capsule) UnmarshalJSON(b []byte) error { + type tmp Capsule + + // Support for "older" zun time formats. + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339ZNoT `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339ZNoT `json:"updated_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Capsule(s1.tmp) + + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + + return nil + } + + // Support for "new" zun time formats. + var s2 struct { + tmp + CreatedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"updated_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Capsule(s2.tmp) + + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + + return nil +} + +func (r *Container) UnmarshalJSON(b []byte) error { + type tmp Container + + // Support for "older" zun time formats. + var s1 struct { + tmp + CreatedAt gophercloud.JSONRFC3339ZNoT `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339ZNoT `json:"updated_at"` + StartedAt gophercloud.JSONRFC3339ZNoT `json:"started_at"` + } + + err := json.Unmarshal(b, &s1) + if err == nil { + *r = Container(s1.tmp) + + r.CreatedAt = time.Time(s1.CreatedAt) + r.UpdatedAt = time.Time(s1.UpdatedAt) + r.StartedAt = time.Time(s1.StartedAt) + + return nil + } + + // Support for "new" zun time formats. + var s2 struct { + tmp + CreatedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"updated_at"` + StartedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"started_at"` + } + + err = json.Unmarshal(b, &s2) + if err != nil { + return err + } + + *r = Container(s2.tmp) + + r.CreatedAt = time.Time(s2.CreatedAt) + r.UpdatedAt = time.Time(s2.UpdatedAt) + r.StartedAt = time.Time(s2.StartedAt) + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/template.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/template.go new file mode 100644 index 000000000000..ad72ca9ff28a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/template.go @@ -0,0 +1,27 @@ +package capsules + +import ( + "encoding/json" + + "gopkg.in/yaml.v2" +) + +// Template is a structure that represents OpenStack Zun Capsule templates +type Template struct { + // Bin stores the contents of the template or environment. + Bin []byte + // Parsed contains a parsed version of Bin. Since there are 2 different + // fields referring to the same value, you must be careful when accessing + // this filed. + Parsed map[string]interface{} +} + +// Parse will parse the contents and then validate. The contents MUST be either JSON or YAML. +func (t *Template) Parse() error { + if jerr := json.Unmarshal(t.Bin, &t.Parsed); jerr != nil { + if yerr := yaml.Unmarshal(t.Bin, &t.Parsed); yerr != nil { + return ErrInvalidDataFormat{} + } + } + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/fixtures.go new file mode 100644 index 000000000000..18430d5413a1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/fixtures.go @@ -0,0 +1,570 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/container/v1/capsules" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ValidJSONTemplate is a valid OpenStack Capsule template in JSON format +const ValidJSONTemplate = ` +{ + "capsuleVersion": "beta", + "kind": "capsule", + "metadata": { + "labels": { + "app": "web", + "app1": "web1" + }, + "name": "template" + }, + "spec": { + "restartPolicy": "Always", + "containers": [ + { + "command": [ + "/bin/bash" + ], + "env": { + "ENV1": "/usr/local/bin", + "ENV2": "/usr/bin" + }, + "image": "ubuntu", + "imagePullPolicy": "ifnotpresent", + "ports": [ + { + "containerPort": 80, + "hostPort": 80, + "name": "nginx-port", + "protocol": "TCP" + } + ], + "resources": { + "requests": { + "cpu": 1, + "memory": 1024 + } + }, + "workDir": "/root" + } + ] + } +} +` + +// ValidYAMLTemplate is a valid OpenStack Capsule template in YAML format +const ValidYAMLTemplate = ` +capsuleVersion: beta +kind: capsule +metadata: + name: template + labels: + app: web + app1: web1 +spec: + restartPolicy: Always + containers: + - image: ubuntu + command: + - "/bin/bash" + imagePullPolicy: ifnotpresent + workDir: /root + ports: + - name: nginx-port + containerPort: 80 + hostPort: 80 + protocol: TCP + resources: + requests: + cpu: 1 + memory: 1024 + env: + ENV1: /usr/local/bin + ENV2: /usr/bin +` + +// ValidJSONTemplateParsed is the expected parsed version of ValidJSONTemplate +var ValidJSONTemplateParsed = map[string]interface{}{ + "capsuleVersion": "beta", + "kind": "capsule", + "metadata": map[string]interface{}{ + "name": "template", + "labels": map[string]string{ + "app": "web", + "app1": "web1", + }, + }, + "spec": map[string]interface{}{ + "restartPolicy": "Always", + "containers": []map[string]interface{}{ + map[string]interface{}{ + "image": "ubuntu", + "command": []interface{}{ + "/bin/bash", + }, + "imagePullPolicy": "ifnotpresent", + "workDir": "/root", + "ports": []interface{}{ + map[string]interface{}{ + "name": "nginx-port", + "containerPort": float64(80), + "hostPort": float64(80), + "protocol": "TCP", + }, + }, + "resources": map[string]interface{}{ + "requests": map[string]interface{}{ + "cpu": float64(1), + "memory": float64(1024), + }, + }, + "env": map[string]interface{}{ + "ENV1": "/usr/local/bin", + "ENV2": "/usr/bin", + }, + }, + }, + }, +} + +// ValidYAMLTemplateParsed is the expected parsed version of ValidYAMLTemplate +var ValidYAMLTemplateParsed = map[string]interface{}{ + "capsuleVersion": "beta", + "kind": "capsule", + "metadata": map[string]interface{}{ + "name": "template", + "labels": map[string]string{ + "app": "web", + "app1": "web1", + }, + }, + "spec": map[interface{}]interface{}{ + "restartPolicy": "Always", + "containers": []map[interface{}]interface{}{ + map[interface{}]interface{}{ + "image": "ubuntu", + "command": []interface{}{ + "/bin/bash", + }, + "imagePullPolicy": "ifnotpresent", + "workDir": "/root", + "ports": []interface{}{ + map[interface{}]interface{}{ + "name": "nginx-port", + "containerPort": 80, + "hostPort": 80, + "protocol": "TCP", + }, + }, + "resources": map[interface{}]interface{}{ + "requests": map[interface{}]interface{}{ + "cpu": 1, + "memory": 1024, + }, + }, + "env": map[interface{}]interface{}{ + "ENV1": "/usr/local/bin", + "ENV2": "/usr/bin", + }, + }, + }, + }, +} + +const CapsuleGetBody_OldTime = ` +{ + "uuid": "cc654059-1a77-47a3-bfcf-715bde5aad9e", + "status": "Running", + "user_id": "d33b18c384574fd2a3299447aac285f0", + "project_id": "6b8ffef2a0ac42ee87887b9cc98bdf68", + "cpu": 1, + "memory": "1024M", + "meta_name": "test", + "meta_labels": {"web": "app"}, + "created_at": "2018-01-12 09:37:25+00:00", + "updated_at": "2018-01-12 09:37:26+00:00", + "links": [ + { + "href": "http://10.10.10.10/v1/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "self" + }, + { + "href": "http://10.10.10.10/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "bookmark" + } + ], + "capsule_version": "beta", + "restart_policy": "always", + "containers_uuids": ["1739e28a-d391-4fd9-93a5-3ba3f29a4c9b"], + "addresses": { + "b1295212-64e1-471d-aa01-25ff46f9818d": [ + { + "version": 4, + "preserve_on_delete": false, + "addr": "172.24.4.11", + "port": "8439060f-381a-4386-a518-33d5a4058636", + "subnet_id": "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a" + } + ] + }, + "volumes_info": { + "67618d54-dd55-4f7e-91b3-39ffb3ba7f5f": [ + "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b" + ] + }, + "host": "test-host", + "status_reason": "No reason", + "containers": [ + { + "addresses": { + "b1295212-64e1-471d-aa01-25ff46f9818d": [ + { + "version": 4, + "preserve_on_delete": false, + "addr": "172.24.4.11", + "port": "8439060f-381a-4386-a518-33d5a4058636", + "subnet_id": "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a" + } + ] + }, + "image": "test", + "labels": {"foo": "bar"}, + "created_at": "2018-01-12 09:37:25+00:00", + "updated_at": "2018-01-12 09:37:26+00:00", + "started_at": "2018-01-12 09:37:26+00:00", + "workdir": "/root", + "disk": 0, + "security_groups": ["default"], + "image_pull_policy": "ifnotpresent", + "task_state": "Creating", + "user_id": "d33b18c384574fd2a3299447aac285f0", + "project_id": "6b8ffef2a0ac42ee87887b9cc98bdf68", + "uuid": "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", + "hostname": "test-hostname", + "environment": {"USER1": "test"}, + "memory": "1024M", + "status": "Running", + "auto_remove": false, + "auto_heal": false, + "host": "test-host", + "image_driver": "docker", + "status_detail": "Just created", + "status_reason": "No reason", + "name": "test-demo-omicron-13", + "restart_policy": { + "MaximumRetryCount": "0", + "Name": "always" + }, + "ports": [80], + "command": ["testcmd"], + "runtime": "runc", + "cpu": 1, + "interactive": true + } + ] +}` + +const CapsuleGetBody_NewTime = ` +{ + "uuid": "cc654059-1a77-47a3-bfcf-715bde5aad9e", + "status": "Running", + "user_id": "d33b18c384574fd2a3299447aac285f0", + "project_id": "6b8ffef2a0ac42ee87887b9cc98bdf68", + "cpu": 1, + "memory": "1024M", + "meta_name": "test", + "meta_labels": {"web": "app"}, + "created_at": "2018-01-12 09:37:25", + "updated_at": "2018-01-12 09:37:26", + "links": [ + { + "href": "http://10.10.10.10/v1/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "self" + }, + { + "href": "http://10.10.10.10/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "bookmark" + } + ], + "capsule_version": "beta", + "restart_policy": "always", + "containers_uuids": ["1739e28a-d391-4fd9-93a5-3ba3f29a4c9b"], + "addresses": { + "b1295212-64e1-471d-aa01-25ff46f9818d": [ + { + "version": 4, + "preserve_on_delete": false, + "addr": "172.24.4.11", + "port": "8439060f-381a-4386-a518-33d5a4058636", + "subnet_id": "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a" + } + ] + }, + "volumes_info": { + "67618d54-dd55-4f7e-91b3-39ffb3ba7f5f": [ + "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b" + ] + }, + "host": "test-host", + "status_reason": "No reason", + "containers": [ + { + "addresses": { + "b1295212-64e1-471d-aa01-25ff46f9818d": [ + { + "version": 4, + "preserve_on_delete": false, + "addr": "172.24.4.11", + "port": "8439060f-381a-4386-a518-33d5a4058636", + "subnet_id": "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a" + } + ] + }, + "image": "test", + "labels": {"foo": "bar"}, + "created_at": "2018-01-12 09:37:25", + "updated_at": "2018-01-12 09:37:26", + "started_at": "2018-01-12 09:37:26", + "workdir": "/root", + "disk": 0, + "security_groups": ["default"], + "image_pull_policy": "ifnotpresent", + "task_state": "Creating", + "user_id": "d33b18c384574fd2a3299447aac285f0", + "project_id": "6b8ffef2a0ac42ee87887b9cc98bdf68", + "uuid": "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", + "hostname": "test-hostname", + "environment": {"USER1": "test"}, + "memory": "1024M", + "status": "Running", + "auto_remove": false, + "auto_heal": false, + "host": "test-host", + "image_driver": "docker", + "status_detail": "Just created", + "status_reason": "No reason", + "name": "test-demo-omicron-13", + "restart_policy": { + "MaximumRetryCount": "0", + "Name": "always" + }, + "ports": [80], + "command": ["testcmd"], + "runtime": "runc", + "cpu": 1, + "interactive": true + } + ] +}` + +const CapsuleListBody = ` +{ + "capsules": [ + { + "uuid": "cc654059-1a77-47a3-bfcf-715bde5aad9e", + "status": "Running", + "user_id": "d33b18c384574fd2a3299447aac285f0", + "project_id": "6b8ffef2a0ac42ee87887b9cc98bdf68", + "cpu": 1, + "memory": "1024M", + "meta_name": "test", + "meta_labels": {"web": "app"}, + "created_at": "2018-01-12 09:37:25+00:00", + "updated_at": "2018-01-12 09:37:25+01:00", + "links": [ + { + "href": "http://10.10.10.10/v1/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "self" + }, + { + "href": "http://10.10.10.10/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "bookmark" + } + ], + "capsule_version": "beta", + "restart_policy": "always", + "containers_uuids": ["1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", "d1469e8d-bcbc-43fc-b163-8b9b6a740930"], + "addresses": { + "b1295212-64e1-471d-aa01-25ff46f9818d": [ + { + "version": 4, + "preserve_on_delete": false, + "addr": "172.24.4.11", + "port": "8439060f-381a-4386-a518-33d5a4058636", + "subnet_id": "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a" + } + ] + }, + "volumes_info": { + "67618d54-dd55-4f7e-91b3-39ffb3ba7f5f": [ + "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b" + ] + }, + "host": "test-host", + "status_reason": "No reason" + } + ] +}` + +var ExpectedContainer1 = capsules.Container{ + Name: "test-demo-omicron-13", + UUID: "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", + UserID: "d33b18c384574fd2a3299447aac285f0", + ProjectID: "6b8ffef2a0ac42ee87887b9cc98bdf68", + CPU: float64(1), + Memory: "1024M", + Host: "test-host", + Status: "Running", + Image: "test", + Labels: map[string]string{ + "foo": "bar", + }, + WorkDir: "/root", + Disk: 0, + Command: []string{ + "testcmd", + }, + Ports: []int{ + 80, + }, + SecurityGroups: []string{ + "default", + }, + ImagePullPolicy: "ifnotpresent", + Runtime: "runc", + TaskState: "Creating", + HostName: "test-hostname", + Environment: map[string]string{ + "USER1": "test", + }, + StatusReason: "No reason", + StatusDetail: "Just created", + ImageDriver: "docker", + Interactive: true, + AutoRemove: false, + AutoHeal: false, + RestartPolicy: map[string]string{ + "MaximumRetryCount": "0", + "Name": "always", + }, + Addresses: map[string][]capsules.Address{ + "b1295212-64e1-471d-aa01-25ff46f9818d": []capsules.Address{ + { + PreserveOnDelete: false, + Addr: "172.24.4.11", + Port: "8439060f-381a-4386-a518-33d5a4058636", + Version: float64(4), + SubnetID: "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a", + }, + }, + }, +} + +var ExpectedCapsule = capsules.Capsule{ + UUID: "cc654059-1a77-47a3-bfcf-715bde5aad9e", + Status: "Running", + UserID: "d33b18c384574fd2a3299447aac285f0", + ProjectID: "6b8ffef2a0ac42ee87887b9cc98bdf68", + CPU: float64(1), + Memory: "1024M", + MetaName: "test", + Links: []interface{}{ + map[string]interface{}{ + "href": "http://10.10.10.10/v1/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "self", + }, + map[string]interface{}{ + "href": "http://10.10.10.10/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", + "rel": "bookmark", + }, + }, + CapsuleVersion: "beta", + RestartPolicy: "always", + MetaLabels: map[string]string{ + "web": "app", + }, + ContainersUUIDs: []string{ + "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", + }, + Addresses: map[string][]capsules.Address{ + "b1295212-64e1-471d-aa01-25ff46f9818d": []capsules.Address{ + { + PreserveOnDelete: false, + Addr: "172.24.4.11", + Port: "8439060f-381a-4386-a518-33d5a4058636", + Version: float64(4), + SubnetID: "4a2bcd64-93ad-4436-9f48-3a7f9b267e0a", + }, + }, + }, + VolumesInfo: map[string][]string{ + "67618d54-dd55-4f7e-91b3-39ffb3ba7f5f": []string{ + "1739e28a-d391-4fd9-93a5-3ba3f29a4c9b", + }, + }, + Host: "test-host", + StatusReason: "No reason", + Containers: []capsules.Container{ + ExpectedContainer1, + }, +} + +// HandleCapsuleGetOldTimeSuccessfully test setup +func HandleCapsuleGetOldTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CapsuleGetBody_OldTime) + }) +} + +// HandleCapsuleGetNewTimeSuccessfully test setup +func HandleCapsuleGetNewTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/capsules/cc654059-1a77-47a3-bfcf-715bde5aad9e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CapsuleGetBody_NewTime) + }) +} + +// HandleCapsuleCreateSuccessfully creates an HTTP handler at `/capsules` on the test handler mux +// that responds with a `Create` response. +func HandleCapsuleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/capsules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, CapsuleGetBody_NewTime) + }) +} + +// HandleCapsuleListSuccessfully test setup +func HandleCapsuleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/capsules/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, CapsuleListBody) + }) +} + +func HandleCapsuleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/capsules/963a239d-3946-452b-be5a-055eab65a421", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/requests_test.go new file mode 100644 index 000000000000..03b93cfe0f7c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/requests_test.go @@ -0,0 +1,119 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/container/v1/capsules" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetCapsule_OldTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCapsuleGetOldTimeSuccessfully(t) + + createdAt, _ := time.Parse(gophercloud.RFC3339ZNoT, "2018-01-12 09:37:25+00:00") + updatedAt, _ := time.Parse(gophercloud.RFC3339ZNoT, "2018-01-12 09:37:26+00:00") + startedAt, _ := time.Parse(gophercloud.RFC3339ZNoT, "2018-01-12 09:37:26+00:00") + + ExpectedCapsule.CreatedAt = createdAt + ExpectedCapsule.UpdatedAt = updatedAt + ExpectedCapsule.Containers[0].CreatedAt = createdAt + ExpectedCapsule.Containers[0].UpdatedAt = updatedAt + ExpectedCapsule.Containers[0].StartedAt = startedAt + + actualCapsule, err := capsules.Get(fakeclient.ServiceClient(), ExpectedCapsule.UUID).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &ExpectedCapsule, actualCapsule) +} + +func TestGetCapsule_NewTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCapsuleGetNewTimeSuccessfully(t) + + createdAt, _ := time.Parse(gophercloud.RFC3339ZNoTNoZ, "2018-01-12 09:37:25") + updatedAt, _ := time.Parse(gophercloud.RFC3339ZNoTNoZ, "2018-01-12 09:37:26") + startedAt, _ := time.Parse(gophercloud.RFC3339ZNoTNoZ, "2018-01-12 09:37:26") + + ExpectedCapsule.CreatedAt = createdAt + ExpectedCapsule.UpdatedAt = updatedAt + ExpectedCapsule.Containers[0].CreatedAt = createdAt + ExpectedCapsule.Containers[0].UpdatedAt = updatedAt + ExpectedCapsule.Containers[0].StartedAt = startedAt + + actualCapsule, err := capsules.Get(fakeclient.ServiceClient(), ExpectedCapsule.UUID).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &ExpectedCapsule, actualCapsule) +} + +func TestCreateCapsule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCapsuleCreateSuccessfully(t) + + template := new(capsules.Template) + template.Bin = []byte(ValidJSONTemplate) + + createOpts := capsules.CreateOpts{ + TemplateOpts: template, + } + actualCapsule, err := capsules.Create(fakeclient.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &ExpectedCapsule, actualCapsule) +} + +func TestListCapsule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCapsuleListSuccessfully(t) + + createdAt, _ := time.Parse(gophercloud.RFC3339ZNoT, "2018-01-12 09:37:25+00:00") + updatedAt, _ := time.Parse(gophercloud.RFC3339ZNoT, "2018-01-12 09:37:25+01:00") + + ExpectedCapsule.CreatedAt = createdAt + ExpectedCapsule.UpdatedAt = updatedAt + ExpectedCapsule.Containers = nil + + expected := []capsules.Capsule{ExpectedCapsule} + + count := 0 + results := capsules.List(fakeclient.ServiceClient(), nil) + err := results.EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := capsules.ExtractCapsules(page) + if err != nil { + t.Errorf("Failed to extract capsules: %v", err) + return false, err + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCapsuleDeleteSuccessfully(t) + + res := capsules.Delete(fakeclient.ServiceClient(), "963a239d-3946-452b-be5a-055eab65a421") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/template_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/template_test.go new file mode 100644 index 000000000000..62d0de8f5dea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/testing/template_test.go @@ -0,0 +1,29 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/container/v1/capsules" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTemplateParsing(t *testing.T) { + templateJSON := new(capsules.Template) + templateJSON.Bin = []byte(ValidJSONTemplate) + err := templateJSON.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONTemplateParsed, templateJSON.Parsed) + + templateYAML := new(capsules.Template) + templateYAML.Bin = []byte(ValidYAMLTemplate) + err = templateYAML.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidYAMLTemplateParsed, templateYAML.Parsed) + + templateInvalid := new(capsules.Template) + templateInvalid.Bin = []byte("Keep Austin Weird") + err = templateInvalid.Parse() + if err == nil { + t.Error("Template parsing did not catch invalid template") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/urls.go new file mode 100644 index 000000000000..575fb2a712a9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/container/v1/capsules/urls.go @@ -0,0 +1,21 @@ +package capsules + +import "github.com/gophercloud/gophercloud" + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("capsules", id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("capsules") +} + +// `listURL` is a pure function. `listURL(c)` is a URL for which a GET +// request will respond with a list of capsules in the service `c`. +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("capsules") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("capsules", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/doc.go new file mode 100644 index 000000000000..b5c39f7e6689 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/doc.go @@ -0,0 +1,33 @@ +// Package certificates contains functionality for working with Magnum Certificate +// resources. +/* +Package certificates provides information and interaction with the certificates through +the OpenStack Container Infra service. + +Example to get certificates + + certificate, err := certificates.Get(serviceClient, "d564b18a-2890-4152-be3d-e05d784ff72").Extract() + if err != nil { + panic(err) + } + +Example to create certificates + + opts := certificates.CreateOpts{ + BayUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + CSR: "-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UEAxMJWW91ciBOYW1lMIICIjANBgkqhkiG9w0B\n-----END CERTIFICATE REQUEST-----\n", + } + + response, err := certificates.Create(sc, opts).Extract() + if err != nil { + panic(err) + } + +Example to update certificates + + err := certificates.Update(client, clusterUUID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package certificates diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/requests.go new file mode 100644 index 000000000000..011546b68326 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/requests.go @@ -0,0 +1,65 @@ +package certificates + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" +) + +// CreateOptsBuilder allows extensions to add additional parameters +// to the Create request. +type CreateOptsBuilder interface { + ToCertificateCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a certificate. +type CreateOpts struct { + ClusterUUID string `json:"cluster_uuid,omitempty" xor:"BayUUID"` + BayUUID string `json:"bay_uuid,omitempty" xor:"ClusterUUID"` + CSR string `json:"csr" required:"true"` +} + +// ToCertificateCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToCertificateCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Get makes a request against the API to get details for a certificate. +func Get(client *gophercloud.ServiceClient, clusterID string) (r GetResult) { + url := getURL(client, clusterID) + + _, r.Err = client.Get(url, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + + return +} + +// Create requests the creation of a new certificate. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToCertificateCreateMap() + if err != nil { + r.Err = err + return + } + + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Update will rotate the CA certificate for a cluster +func Update(client *gophercloud.ServiceClient, clusterID string) (r UpdateResult) { + _, r.Err = client.Patch(updateURL(client, clusterID), nil, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/results.go new file mode 100644 index 000000000000..654e585fd420 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/results.go @@ -0,0 +1,40 @@ +package certificates + +import ( + "github.com/gophercloud/gophercloud" +) + +type commonResult struct { + gophercloud.Result +} + +// GetResult is the response of a Get operations. +type GetResult struct { + commonResult +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + commonResult +} + +// UpdateResult is the response of an Update operations. +type UpdateResult struct { + gophercloud.ErrResult +} + +// Extract is a function that accepts a result and extracts a certificate resource. +func (r commonResult) Extract() (*Certificate, error) { + var s *Certificate + err := r.ExtractInto(&s) + return s, err +} + +// Represents a Certificate +type Certificate struct { + ClusterUUID string `json:"cluster_uuid"` + BayUUID string `json:"bay_uuid"` + Links []gophercloud.Link `json:"links"` + PEM string `json:"pem"` + CSR string `json:"csr"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/fixtures.go new file mode 100644 index 000000000000..0634b7e25d52 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/fixtures.go @@ -0,0 +1,111 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const CertificateResponse = ` +{ + "cluster_uuid": "d564b18a-2890-4152-be3d-e05d784ff727", + "bay_uuid": "d564b18a-2890-4152-be3d-e05d784ff727", + "pem": "FAKE_CERTIFICATE", + "links": [ + { + "href": "http://10.63.176.154:9511/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff727", + "rel": "self" + }, + { + "href": "http://10.63.176.154:9511/certificates/d564b18a-2890-4152-be3d-e05d784ff727", + "rel": "bookmark" + } + ] +}` + +const CreateCertificateResponse = ` +{ + "cluster_uuid": "d564b18a-2890-4152-be3d-e05d784ff727", + "bay_uuid": "d564b18a-2890-4152-be3d-e05d784ff727", + "pem": "FAKE_CERTIFICATE_PEM", + "csr": "FAKE_CERTIFICATE_CSR", + "links": [ + { + "href": "http://10.63.176.154:9511/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff727", + "rel": "self" + }, + { + "href": "http://10.63.176.154:9511/certificates/d564b18a-2890-4152-be3d-e05d784ff727", + "rel": "bookmark" + } + ] +}` + +var ExpectedCertificate = certificates.Certificate{ + ClusterUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + BayUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + PEM: "FAKE_CERTIFICATE", + Links: []gophercloud.Link{ + {Href: "http://10.63.176.154:9511/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff727", Rel: "self"}, + {Href: "http://10.63.176.154:9511/certificates/d564b18a-2890-4152-be3d-e05d784ff727", Rel: "bookmark"}, + }, +} + +var ExpectedCreateCertificateResponse = certificates.Certificate{ + ClusterUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + BayUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + PEM: "FAKE_CERTIFICATE_PEM", + CSR: "FAKE_CERTIFICATE_CSR", + Links: []gophercloud.Link{ + {Href: "http://10.63.176.154:9511/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff727", Rel: "self"}, + {Href: "http://10.63.176.154:9511/certificates/d564b18a-2890-4152-be3d-e05d784ff727", Rel: "bookmark"}, + }, +} + +func HandleGetCertificateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff72", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("OpenStack-API-Minimum-Version", "container-infra 1.1") + w.Header().Add("OpenStack-API-Maximum-Version", "container-infra 1.6") + w.Header().Add("OpenStack-API-Version", "container-infra 1.1") + w.Header().Add("X-OpenStack-Request-Id", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, CertificateResponse) + }) +} + +func HandleCreateCertificateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/certificates/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("OpenStack-API-Minimum-Version", "container-infra 1.1") + w.Header().Add("OpenStack-API-Maximum-Version", "container-infra 1.6") + w.Header().Add("OpenStack-API-Version", "container-infra 1.1") + w.Header().Add("X-OpenStack-Request-Id", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.WriteHeader(http.StatusCreated) + + fmt.Fprint(w, CreateCertificateResponse) + }) +} + +func HandleUpdateCertificateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/certificates/d564b18a-2890-4152-be3d-e05d784ff72", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, `{}`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/requests_test.go new file mode 100644 index 000000000000..7ad95e9cc983 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/testing/requests_test.go @@ -0,0 +1,55 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetCertificates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetCertificateSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + + actual, err := certificates.Get(sc, "d564b18a-2890-4152-be3d-e05d784ff72").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCertificate, *actual) +} + +func TestCreateCertificates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateCertificateSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + + opts := certificates.CreateOpts{ + BayUUID: "d564b18a-2890-4152-be3d-e05d784ff727", + CSR: "FAKE_CERTIFICATE_CSR", + } + + actual, err := certificates.Create(sc, opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCreateCertificateResponse, *actual) +} + +func TestUpdateCertificates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateCertificateSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + + err := certificates.Update(sc, "d564b18a-2890-4152-be3d-e05d784ff72").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/urls.go new file mode 100644 index 000000000000..87c831d88eaf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/certificates/urls.go @@ -0,0 +1,23 @@ +package certificates + +import ( + "github.com/gophercloud/gophercloud" +) + +var apiName = "certificates" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiName) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiName, id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiName, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go new file mode 100644 index 000000000000..2a0a2bac5ce0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/doc.go @@ -0,0 +1,57 @@ +/* +Package clusters contains functionality for working with Magnum Cluster resources. + +Example to Create a Cluster + + masterCount := 1 + nodeCount := 1 + createTimeout := 30 + opts := clusters.CreateOpts{ + ClusterTemplateID: "0562d357-8641-4759-8fed-8173f02c9633", + CreateTimeout: &createTimeout, + DiscoveryURL: "", + FlavorID: "m1.small", + KeyPair: "my_keypair", + Labels: map[string]string{}, + MasterCount: &masterCount, + MasterFlavorID: "m1.small", + Name: "k8s", + NodeCount: &nodeCount, + } + + cluster, err := clusters.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Cluster + + clusterName := "cluster123" + cluster, err := clusters.Get(serviceClient, clusterName).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v\n", cluster) + +Example to List Clusters + + listOpts := clusters.ListOpts{ + Limit: 20, + } + + allPages, err := clusters.List(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allClusters, err := clusters.ExtractClusters(allPages) + if err != nil { + panic(err) + } + + for _, cluster := range allClusters { + fmt.Printf("%+v\n", cluster) + } + +*/ +package clusters diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go new file mode 100644 index 000000000000..b1b4ec7ec923 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/requests.go @@ -0,0 +1,102 @@ +package clusters + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToClusterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts params +type CreateOpts struct { + ClusterTemplateID string `json:"cluster_template_id" required:"true"` + CreateTimeout *int `json:"create_timeout"` + DiscoveryURL string `json:"discovery_url,omitempty"` + DockerVolumeSize *int `json:"docker_volume_size,omitempty"` + FlavorID string `json:"flavor_id,omitempty"` + Keypair string `json:"keypair,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + MasterCount *int `json:"master_count,omitempty"` + MasterFlavorID string `json:"master_flavor_id,omitempty"` + Name string `json:"name"` + NodeCount *int `json:"node_count,omitempty"` +} + +// ToClusterCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new cluster. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToClusterCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Get retrieves a specific clusters based on its unique ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + if r.Err == nil { + r.Header = result.Header + } + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToClustersListQuery() (string, error) +} + +// ListOpts allows the sorting of paginated collections through +// the API. SortKey allows you to sort by a particular cluster attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToClustersListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToClustersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// clusters. It accepts a ListOptsBuilder, which allows you to sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToClustersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ClusterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go new file mode 100644 index 000000000000..11e648227a29 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/results.go @@ -0,0 +1,95 @@ +package clusters + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. +type GetResult struct { + commonResult +} + +func (r CreateResult) Extract() (clusterID string, err error) { + var s struct { + UUID string + } + err = r.ExtractInto(&s) + return s.UUID, err +} + +// Extract is a function that accepts a result and extracts a cluster resource. +func (r commonResult) Extract() (*Cluster, error) { + var s *Cluster + err := r.ExtractInto(&s) + return s, err +} + +type Cluster struct { + APIAddress string `json:"api_address"` + COEVersion string `json:"coe_version"` + ClusterTemplateID string `json:"cluster_template_id"` + ContainerVersion string `json:"container_version"` + CreateTimeout int `json:"create_timeout"` + CreatedAt time.Time `json:"created_at"` + DiscoveryURL string `json:"discovery_url"` + DockerVolumeSize int `json:"docker_volume_size"` + Faults map[string]string `json:"faults"` + FlavorID string `json:"flavor_id"` + KeyPair string `json:"keypair"` + Labels map[string]string `json:"labels"` + Links []gophercloud.Link `json:"links"` + MasterFlavorID string `json:"master_flavor_id"` + MasterAddresses []string `json:"master_addresses"` + MasterCount int `json:"master_count"` + Name string `json:"name"` + NodeAddresses []string `json:"node_addresses"` + NodeCount int `json:"node_count"` + ProjectID string `json:"project_id"` + StackID string `json:"stack_id"` + Status string `json:"status"` + StatusReason string `json:"status_reason"` + UUID string `json:"uuid"` + UpdatedAt time.Time `json:"updated_at"` + UserID string `json:"user_id"` +} + +type ClusterPage struct { + pagination.LinkedPageBase +} + +func (r ClusterPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, nil +} + +// IsEmpty checks whether a ClusterPage struct is empty. +func (r ClusterPage) IsEmpty() (bool, error) { + is, err := ExtractClusters(r) + return len(is) == 0, err +} + +func ExtractClusters(r pagination.Page) ([]Cluster, error) { + var s struct { + Clusters []Cluster `json:"clusters"` + } + err := (r.(ClusterPage)).ExtractInto(&s) + return s.Clusters, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/fixtures.go new file mode 100644 index 000000000000..357b404d588f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/fixtures.go @@ -0,0 +1,231 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const clusterUUID = "746e779a-751a-456b-a3e9-c883d734946f" +const clusterUUID2 = "846e779a-751a-456b-a3e9-c883d734946f" +const requestUUID = "req-781e9bdc-4163-46eb-91c9-786c53188bbb" + +var ClusterCreateResponse = fmt.Sprintf(` + { + "uuid":"%s" + }`, clusterUUID) + +var ExpectedCluster = clusters.Cluster{ + APIAddress: "https://172.24.4.6:6443", + COEVersion: "v1.2.0", + ClusterTemplateID: "0562d357-8641-4759-8fed-8173f02c9633", + CreateTimeout: 60, + CreatedAt: time.Date(2016, 8, 29, 6, 51, 31, 0, time.UTC), + DiscoveryURL: "https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + Links: []gophercloud.Link{ + { + Href: "http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", + Rel: "self", + }, + { + Href: "http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", + Rel: "bookmark", + }, + }, + KeyPair: "my-keypair", + MasterAddresses: []string{"172.24.4.6"}, + MasterCount: 1, + Name: "k8s", + NodeAddresses: []string{"172.24.4.13"}, + NodeCount: 1, + StackID: "9c6f1169-7300-4d08-a444-d2be38758719", + Status: "CREATE_COMPLETE", + StatusReason: "Stack CREATE completed successfully", + UpdatedAt: time.Date(2016, 8, 29, 6, 53, 24, 0, time.UTC), + UUID: clusterUUID, +} + +var ExpectedCluster2 = clusters.Cluster{ + APIAddress: "https://172.24.4.6:6443", + COEVersion: "v1.2.0", + ClusterTemplateID: "0562d357-8641-4759-8fed-8173f02c9633", + CreateTimeout: 60, + CreatedAt: time.Time{}, + DiscoveryURL: "https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + Links: []gophercloud.Link{ + { + Href: "http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", + Rel: "self", + }, + { + Href: "http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", + Rel: "bookmark", + }, + }, + KeyPair: "my-keypair", + MasterAddresses: []string{"172.24.4.6"}, + MasterCount: 1, + Name: "k8s", + NodeAddresses: []string{"172.24.4.13"}, + NodeCount: 1, + StackID: "9c6f1169-7300-4d08-a444-d2be38758719", + Status: "CREATE_COMPLETE", + StatusReason: "Stack CREATE completed successfully", + UpdatedAt: time.Date(2016, 8, 29, 6, 53, 24, 0, time.UTC), + UUID: clusterUUID2, +} + +var ExpectedClusterUUID = clusterUUID + +func HandleCreateClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-Id", requestUUID) + w.WriteHeader(http.StatusAccepted) + + fmt.Fprint(w, ClusterCreateResponse) + }) +} + +func HandleGetClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters/"+clusterUUID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterGetResponse) + }) +} + +var ClusterGetResponse = fmt.Sprintf(` +{ + "status":"CREATE_COMPLETE", + "uuid":"%s", + "links":[ + { + "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"self" + }, + { + "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"bookmark" + } + ], + "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", + "created_at":"2016-08-29T06:51:31+00:00", + "api_address":"https://172.24.4.6:6443", + "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + "updated_at":"2016-08-29T06:53:24+00:00", + "master_count":1, + "coe_version": "v1.2.0", + "keypair":"my-keypair", + "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", + "master_addresses":[ + "172.24.4.6" + ], + "node_count":1, + "node_addresses":[ + "172.24.4.13" + ], + "status_reason":"Stack CREATE completed successfully", + "create_timeout":60, + "name":"k8s" +}`, clusterUUID) + +var ClusterListResponse = fmt.Sprintf(` +{ + "clusters": [ + { + "api_address":"https://172.24.4.6:6443", + "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", + "coe_version": "v1.2.0", + "create_timeout":60, + "created_at":"2016-08-29T06:51:31+00:00", + "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + "keypair":"my-keypair", + "links":[ + { + "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"self" + }, + { + "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"bookmark" + } + ], + "master_addresses":[ + "172.24.4.6" + ], + "master_count":1, + "name":"k8s", + "node_addresses":[ + "172.24.4.13" + ], + "node_count":1, + "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", + "status":"CREATE_COMPLETE", + "status_reason":"Stack CREATE completed successfully", + "updated_at":"2016-08-29T06:53:24+00:00", + "uuid":"%s" + }, + { + "api_address":"https://172.24.4.6:6443", + "cluster_template_id":"0562d357-8641-4759-8fed-8173f02c9633", + "coe_version": "v1.2.0", + "create_timeout":60, + "created_at":null, + "discovery_url":"https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + "keypair":"my-keypair", + "links":[ + { + "href":"http://10.164.180.104:9511/v1/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"self" + }, + { + "href":"http://10.164.180.104:9511/clusters/746e779a-751a-456b-a3e9-c883d734946f", + "rel":"bookmark" + } + ], + "master_addresses":[ + "172.24.4.6" + ], + "master_count":1, + "name":"k8s", + "node_addresses":[ + "172.24.4.13" + ], + "node_count":1, + "stack_id":"9c6f1169-7300-4d08-a444-d2be38758719", + "status":"CREATE_COMPLETE", + "status_reason":"Stack CREATE completed successfully", + "updated_at":null, + "uuid":"%s" + } + ] +}`, clusterUUID, clusterUUID2) + +var ExpectedClusters = []clusters.Cluster{ExpectedCluster} + +func HandleListClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clusters", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("X-OpenStack-Request-Id", requestUUID) + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterListResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/requests_test.go new file mode 100644 index 000000000000..2fd41ef19cc2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/testing/requests_test.go @@ -0,0 +1,88 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateClusterSuccessfully(t) + + masterCount := 1 + nodeCount := 1 + createTimeout := 30 + opts := clusters.CreateOpts{ + ClusterTemplateID: "0562d357-8641-4759-8fed-8173f02c9633", + CreateTimeout: &createTimeout, + DiscoveryURL: "", + FlavorID: "m1.small", + Keypair: "my_keypair", + Labels: map[string]string{}, + MasterCount: &masterCount, + MasterFlavorID: "m1.small", + Name: "k8s", + NodeCount: &nodeCount, + } + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + res := clusters.Create(sc, opts) + th.AssertNoErr(t, res.Err) + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, requestUUID, requestID) + + actual, err := res.Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, clusterUUID, actual) +} + +func TestGetCluster(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetClusterSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + actual, err := clusters.Get(sc, "746e779a-751a-456b-a3e9-c883d734946f").Extract() + th.AssertNoErr(t, err) + actual.CreatedAt = actual.CreatedAt.UTC() + actual.UpdatedAt = actual.UpdatedAt.UTC() + th.AssertDeepEquals(t, ExpectedCluster, *actual) +} + +func TestListClusters(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListClusterSuccessfully(t) + + count := 0 + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + clusters.List(sc, clusters.ListOpts{Limit: 2}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := clusters.ExtractClusters(page) + th.AssertNoErr(t, err) + for idx := range actual { + actual[idx].CreatedAt = actual[idx].CreatedAt.UTC() + actual[idx].UpdatedAt = actual[idx].UpdatedAt.UTC() + } + th.AssertDeepEquals(t, ExpectedClusters, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go new file mode 100644 index 000000000000..d2a36bdb2529 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clusters/urls.go @@ -0,0 +1,23 @@ +package clusters + +import ( + "github.com/gophercloud/gophercloud" +) + +var apiName = "clusters" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiName) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("clusters", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("clusters") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go new file mode 100644 index 000000000000..12289c2abba1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/doc.go @@ -0,0 +1,90 @@ +// Package clustertemplates contains functionality for working with Magnum Cluster Templates +// resources. +/* +Package clustertemplates provides information and interaction with the cluster-templates through +the OpenStack Container Infra service. + +Example to Create Cluster Template + + boolFalse := false + boolTrue := true + createOpts := clustertemplates.CreateOpts{ + Name: "test-cluster-template", + Labels: map[string]string{}, + FixedSubnet: "", + MasterFlavorID: "", + NoProxy: "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + HTTPSProxy: "http://10.164.177.169:8080", + TLSDisabled: &boolFalse, + KeyPairID: "kp", + Public: &boolFalse, + HTTPProxy: "http://10.164.177.169:8080", + ServerType: "vm", + ExternalNetworkID: "public", + ImageID: "fedora-atomic-latest", + VolumeDriver: "cinder", + RegistryEnabled: &boolFalse, + DockerStorageDriver: "devicemapper", + NetworkDriver: "flannel", + FixedNetwork: "", + COE: "kubernetes", + FlavorID: "m1.small", + MasterLBEnabled: &boolTrue, + DNSNameServer: "8.8.8.8", + } + + clustertemplate, err := clustertemplates.Create(serviceClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete Cluster Template + + clusterTemplateID := "dc6d336e3fc4c0a951b5698cd1236ee" + err := clustertemplates.Delete(serviceClient, clusterTemplateID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Clusters Templates + + listOpts := clustertemplates.ListOpts{ + Limit: 20, + } + + allPages, err := clustertemplates.List(serviceClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allClusterTemplates, err := clusters.ExtractClusterTemplates(allPages) + if err != nil { + panic(err) + } + + for _, clusterTemplate := range allClusterTemplates { + fmt.Printf("%+v\n", clusterTemplate) + } + +Example to Update Cluster Template + + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/master_lb_enabled", + Value: "True", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/registry_enabled", + Value: "True", + }, + } + + clustertemplate, err := clustertemplates.Update(serviceClient, updateOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package clustertemplates diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go new file mode 100644 index 000000000000..c3ceba07441b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/requests.go @@ -0,0 +1,178 @@ +package clustertemplates + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToClusterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts params +type CreateOpts struct { + APIServerPort *int `json:"apiserver_port,omitempty"` + COE string `json:"coe" required:"true"` + DNSNameServer string `json:"dns_nameserver,omitempty"` + DockerStorageDriver string `json:"docker_storage_driver,omitempty"` + DockerVolumeSize *int `json:"docker_volume_size,omitempty"` + ExternalNetworkID string `json:"external_network_id,omitempty"` + FixedNetwork string `json:"fixed_network,omitempty"` + FixedSubnet string `json:"fixed_subnet,omitempty"` + FlavorID string `json:"flavor_id,omitempty"` + FloatingIPEnabled *bool `json:"floating_ip_enabled,omitempty"` + HTTPProxy string `json:"http_proxy,omitempty"` + HTTPSProxy string `json:"https_proxy,omitempty"` + ImageID string `json:"image_id" required:"true"` + InsecureRegistry string `json:"insecure_registry,omitempty"` + KeyPairID string `json:"keypair_id,omitempty"` + Labels map[string]string `json:"labels,omitempty"` + MasterFlavorID string `json:"master_flavor_id,omitempty"` + MasterLBEnabled *bool `json:"master_lb_enabled,omitempty"` + Name string `json:"name,omitempty"` + NetworkDriver string `json:"network_driver,omitempty"` + NoProxy string `json:"no_proxy,omitempty"` + Public *bool `json:"public,omitempty"` + RegistryEnabled *bool `json:"registry_enabled,omitempty"` + ServerType string `json:"server_type,omitempty"` + TLSDisabled *bool `json:"tls_disabled,omitempty"` + VolumeDriver string `json:"volume_driver,omitempty"` +} + +// ToClusterCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToClusterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create requests the creation of a new cluster. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToClusterCreateMap() + if err != nil { + r.Err = err + return + } + var result *http.Response + result, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + + if r.Err == nil { + r.Header = result.Header + } + + return +} + +// Delete deletes the specified cluster ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + var result *http.Response + result, r.Err = client.Delete(deleteURL(client, id), nil) + r.Header = result.Header + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToClusterTemplateListQuery() (string, error) +} + +// ListOpts allows the sorting of paginated collections through +// the API. SortKey allows you to sort by a particular cluster templates attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for pagination. +type ListOpts struct { + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToClusterTemplateListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToClusterTemplateListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// cluster-templates. It accepts a ListOptsBuilder, which allows you to sort +// the returned collection for greater efficiency. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToClusterTemplateListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ClusterTemplatePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific cluster-template based on its unique ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + var result *http.Response + result, r.Err = client.Get(getURL(client, id), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + if r.Err == nil { + r.Header = result.Header + } + return +} + +type UpdateOp string + +const ( + AddOp UpdateOp = "add" + RemoveOp UpdateOp = "remove" + ReplaceOp UpdateOp = "replace" +) + +type UpdateOpts struct { + Op UpdateOp `json:"op" required:"true"` + Path string `json:"path" required:"true"` + Value string `json:"value,omitempty"` +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToClusterTemplateUpdateMap() (map[string]interface{}, error) +} + +// ToClusterUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToClusterTemplateUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Update implements cluster updated request. +func Update(client *gophercloud.ServiceClient, id string, opts []UpdateOptsBuilder) (r UpdateResult) { + var o []map[string]interface{} + for _, opt := range opts { + b, err := opt.ToClusterTemplateUpdateMap() + if err != nil { + r.Err = err + return r + } + o = append(o, b) + } + var result *http.Response + result, r.Err = client.Patch(updateURL(client, id), o, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + if r.Err == nil { + r.Header = result.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go new file mode 100644 index 000000000000..8b416f7e70c9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/results.go @@ -0,0 +1,114 @@ +package clustertemplates + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + commonResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult is the response of a Get operations. +type GetResult struct { + commonResult +} + +// UpdateResult is the response of a Update operations. +type UpdateResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a cluster-template resource. +func (r commonResult) Extract() (*ClusterTemplate, error) { + var s *ClusterTemplate + err := r.ExtractInto(&s) + return s, err +} + +// Represents a template for a Cluster Template +type ClusterTemplate struct { + APIServerPort int `json:"apiserver_port"` + COE string `json:"coe"` + ClusterDistro string `json:"cluster_distro"` + CreatedAt time.Time `json:"created_at"` + DNSNameServer string `json:"dns_nameserver"` + DockerStorageDriver string `json:"docker_storage_driver"` + DockerVolumeSize int `json:"docker_volume_size"` + ExternalNetworkID string `json:"external_network_id"` + FixedNetwork string `json:"fixed_network"` + FixedSubnet string `json:"fixed_subnet"` + FlavorID string `json:"flavor_id"` + FloatingIPEnabled bool `json:"floating_ip_enabled"` + HTTPProxy string `json:"http_proxy"` + HTTPSProxy string `json:"https_proxy"` + ImageID string `json:"image_id"` + InsecureRegistry string `json:"insecure_registry"` + KeyPairID string `json:"keypair_id"` + Labels map[string]string `json:"labels"` + Links []gophercloud.Link `json:"links"` + MasterFlavorID string `json:"master_flavor_id"` + MasterLBEnabled bool `json:"master_lb_enabled"` + Name string `json:"name"` + NetworkDriver string `json:"network_driver"` + NoProxy string `json:"no_proxy"` + ProjectID string `json:"project_id"` + Public bool `json:"public"` + RegistryEnabled bool `json:"registry_enabled"` + ServerType string `json:"server_type"` + TLSDisabled bool `json:"tls_disabled"` + UUID string `json:"uuid"` + UpdatedAt time.Time `json:"updated_at"` + UserID string `json:"user_id"` + VolumeDriver string `json:"volume_driver"` +} + +// ClusterTemplatePage is the page returned by a pager when traversing over a +// collection of cluster-templates. +type ClusterTemplatePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of cluster template has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ClusterTemplatePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, nil +} + +// IsEmpty checks whether a ClusterTemplatePage struct is empty. +func (r ClusterTemplatePage) IsEmpty() (bool, error) { + is, err := ExtractClusterTemplates(r) + return len(is) == 0, err +} + +// ExtractClusterTemplates accepts a Page struct, specifically a ClusterTemplatePage struct, +// and extracts the elements into a slice of cluster templates structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractClusterTemplates(r pagination.Page) ([]ClusterTemplate, error) { + var s struct { + ClusterTemplates []ClusterTemplate `json:"clustertemplates"` + } + err := (r.(ClusterTemplatePage)).ExtractInto(&s) + return s.ClusterTemplates, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/fixtures.go new file mode 100644 index 000000000000..7eca134c536e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/fixtures.go @@ -0,0 +1,532 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ClusterTemplateResponse = ` +{ + "apiserver_port": 8081, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": "2018-06-27T16:52:21+00:00", + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": "devicemapper", + "docker_volume_size": 3, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "floating_ip_enabled": true, + "http_proxy": "http://10.164.177.169:8080", + "https_proxy": "http://10.164.177.169:8080", + "image_id": "Fedora-Atomic-27-20180212.2.x86_64", + "insecure_registry": null, + "keypair_id": "kp", + "labels": null, + "links": [ + { + "href": "http://10.63.176.154:9511/v1/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", + "rel": "self" + }, + { + "href": "http://10.63.176.154:9511/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", + "rel": "bookmark" + } + ], + "master_flavor_id": null, + "master_lb_enabled": true, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + "project_id": "76bd201dbc1641729904ab190d3390c6", + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "user_id": "c48d66144e9c4a54ae2b164b85cfefe3", + "uuid": "79c0f9e5-93b8-4719-8fab-063afc67bffe", + "volume_driver": "cinder" +}` + +const ClusterTemplateResponse_EmptyTime = ` +{ + "apiserver_port": null, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": null, + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": null, + "docker_volume_size": 5, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "http_proxy": null, + "https_proxy": null, + "image_id": "fedora-atomic-latest", + "insecure_registry": null, + "keypair_id": "testkey", + "labels": {}, + "links": [ + { + "href": "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "bookmark" + }, + { + "href": "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "self" + } + ], + "master_flavor_id": null, + "master_lb_enabled": false, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": null, + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "uuid": "472807c2-f175-4946-9765-149701a5aba7", + "volume_driver": null +}` + +const ClusterTemplateListResponse = ` +{ + "clustertemplates": [ + { + "apiserver_port": 8081, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": "2018-06-27T16:52:21+00:00", + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": "devicemapper", + "docker_volume_size": 3, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "floating_ip_enabled": true, + "http_proxy": "http://10.164.177.169:8080", + "https_proxy": "http://10.164.177.169:8080", + "image_id": "Fedora-Atomic-27-20180212.2.x86_64", + "insecure_registry": null, + "keypair_id": "kp", + "labels": null, + "links": [ + { + "href": "http://10.63.176.154:9511/v1/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", + "rel": "self" + }, + { + "href": "http://10.63.176.154:9511/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", + "rel": "bookmark" + } + ], + "master_flavor_id": null, + "master_lb_enabled": true, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + "project_id": "76bd201dbc1641729904ab190d3390c6", + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "user_id": "c48d66144e9c4a54ae2b164b85cfefe3", + "uuid": "79c0f9e5-93b8-4719-8fab-063afc67bffe", + "volume_driver": "cinder" + }, + { + "apiserver_port": null, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": null, + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": null, + "docker_volume_size": 5, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "http_proxy": null, + "https_proxy": null, + "image_id": "fedora-atomic-latest", + "insecure_registry": null, + "keypair_id": "testkey", + "labels": {}, + "links": [ + { + "href": "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "bookmark" + }, + { + "href": "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "self" + } + ], + "master_flavor_id": null, + "master_lb_enabled": false, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": null, + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "uuid": "472807c2-f175-4946-9765-149701a5aba7", + "volume_driver": null + } + ] +}` + +var ExpectedClusterTemplate = clustertemplates.ClusterTemplate{ + APIServerPort: 8081, + COE: "kubernetes", + ClusterDistro: "fedora-atomic", + CreatedAt: time.Date(2018, 6, 27, 16, 52, 21, 0, time.UTC), + DNSNameServer: "8.8.8.8", + DockerStorageDriver: "devicemapper", + DockerVolumeSize: 3, + ExternalNetworkID: "public", + FixedNetwork: "", + FixedSubnet: "", + FlavorID: "m1.small", + FloatingIPEnabled: true, + HTTPProxy: "http://10.164.177.169:8080", + HTTPSProxy: "http://10.164.177.169:8080", + ImageID: "Fedora-Atomic-27-20180212.2.x86_64", + InsecureRegistry: "", + KeyPairID: "kp", + Labels: map[string]string(nil), + Links: []gophercloud.Link{ + {Href: "http://10.63.176.154:9511/v1/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", Rel: "self"}, + {Href: "http://10.63.176.154:9511/clustertemplates/79c0f9e5-93b8-4719-8fab-063afc67bffe", Rel: "bookmark"}, + }, + MasterFlavorID: "", + MasterLBEnabled: true, + Name: "kubernetes-dev", + NetworkDriver: "flannel", + NoProxy: "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + ProjectID: "76bd201dbc1641729904ab190d3390c6", + Public: false, + RegistryEnabled: false, + ServerType: "vm", + TLSDisabled: false, + UUID: "79c0f9e5-93b8-4719-8fab-063afc67bffe", + UpdatedAt: time.Time{}, + UserID: "c48d66144e9c4a54ae2b164b85cfefe3", + VolumeDriver: "cinder", +} + +var ExpectedClusterTemplate_EmptyTime = clustertemplates.ClusterTemplate{ + COE: "kubernetes", + ClusterDistro: "fedora-atomic", + CreatedAt: time.Time{}, + DNSNameServer: "8.8.8.8", + DockerStorageDriver: "", + DockerVolumeSize: 5, + ExternalNetworkID: "public", + FixedNetwork: "", + FixedSubnet: "", + FlavorID: "m1.small", + HTTPProxy: "", + HTTPSProxy: "", + ImageID: "fedora-atomic-latest", + InsecureRegistry: "", + KeyPairID: "testkey", + Labels: map[string]string{}, + Links: []gophercloud.Link{ + {Href: "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "bookmark"}, + {Href: "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "self"}, + }, + MasterFlavorID: "", + MasterLBEnabled: false, + Name: "kubernetes-dev", + NetworkDriver: "flannel", + NoProxy: "", + Public: false, + RegistryEnabled: false, + ServerType: "vm", + TLSDisabled: false, + UUID: "472807c2-f175-4946-9765-149701a5aba7", + UpdatedAt: time.Time{}, + VolumeDriver: "", +} + +var ExpectedClusterTemplates = []clustertemplates.ClusterTemplate{ExpectedClusterTemplate, ExpectedClusterTemplate_EmptyTime} + +func HandleCreateClusterTemplateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("OpenStack-API-Minimum-Version", "container-infra 1.1") + w.Header().Add("OpenStack-API-Maximum-Version", "container-infra 1.6") + w.Header().Add("OpenStack-API-Version", "container-infra 1.1") + w.Header().Add("X-OpenStack-Request-Id", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.WriteHeader(http.StatusCreated) + + fmt.Fprint(w, ClusterTemplateResponse) + }) +} + +func HandleDeleteClusterSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/6dc6d336e3fc4c0a951b5698cd1236ee", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.Header().Add("OpenStack-API-Minimum-Version", "container-infra 1.1") + w.Header().Add("OpenStack-API-Maximum-Version", "container-infra 1.6") + w.Header().Add("OpenStack-API-Version", "container-infra 1.1") + w.Header().Add("X-OpenStack-Request-Id", "req-781e9bdc-4163-46eb-91c9-786c53188bbb") + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandleListClusterTemplateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterTemplateListResponse) + }) +} + +func HandleGetClusterTemplateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterTemplateResponse) + }) +} + +func HandleGetClusterTemplateEmptyTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ClusterTemplateResponse_EmptyTime) + }) +} + +const UpdateResponse = ` +{ + "apiserver_port": null, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": "2016-08-10T13:47:01+00:00", + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": null, + "docker_volume_size": 5, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "http_proxy": null, + "https_proxy": null, + "image_id": "fedora-atomic-latest", + "insecure_registry": null, + "keypair_id": "testkey", + "labels": {}, + "links": [ + { + "href": "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "self" + }, + { + "href": "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "bookmark" + } + ], + "master_flavor_id": null, + "master_lb_enabled": false, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": null, + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "uuid": "472807c2-f175-4946-9765-149701a5aba7", + "volume_driver": null +}` + +const UpdateResponse_EmptyTime = ` +{ + "apiserver_port": null, + "cluster_distro": "fedora-atomic", + "coe": "kubernetes", + "created_at": null, + "dns_nameserver": "8.8.8.8", + "docker_storage_driver": null, + "docker_volume_size": 5, + "external_network_id": "public", + "fixed_network": null, + "fixed_subnet": null, + "flavor_id": "m1.small", + "http_proxy": null, + "https_proxy": null, + "image_id": "fedora-atomic-latest", + "insecure_registry": null, + "keypair_id": "testkey", + "labels": {}, + "links": [ + { + "href": "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "self" + }, + { + "href": "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", + "rel": "bookmark" + } + ], + "master_flavor_id": null, + "master_lb_enabled": false, + "name": "kubernetes-dev", + "network_driver": "flannel", + "no_proxy": null, + "public": false, + "registry_enabled": false, + "server_type": "vm", + "tls_disabled": false, + "updated_at": null, + "uuid": "472807c2-f175-4946-9765-149701a5aba7", + "volume_driver": null +}` + +const UpdateResponse_InvalidUpdate = ` +{ + "errors": [{\"status\": 400, \"code\": \"client\", \"links\": [], \"title\": \"'add' and 'replace' operations needs value\", \"detail\": \"'add' and 'replace' operations needs value\", \"request_id\": \"\"}] +}` + +var ExpectedUpdateClusterTemplate = clustertemplates.ClusterTemplate{ + COE: "kubernetes", + ClusterDistro: "fedora-atomic", + CreatedAt: time.Date(2016, 8, 10, 13, 47, 01, 0, time.UTC), + DNSNameServer: "8.8.8.8", + DockerStorageDriver: "", + DockerVolumeSize: 5, + ExternalNetworkID: "public", + FixedNetwork: "", + FixedSubnet: "", + FlavorID: "m1.small", + HTTPProxy: "", + HTTPSProxy: "", + ImageID: "fedora-atomic-latest", + InsecureRegistry: "", + KeyPairID: "testkey", + Labels: map[string]string{}, + Links: []gophercloud.Link{ + {Href: "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "self"}, + {Href: "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "bookmark"}, + }, + MasterFlavorID: "", + MasterLBEnabled: false, + Name: "kubernetes-dev", + NetworkDriver: "flannel", + NoProxy: "", + Public: false, + RegistryEnabled: false, + ServerType: "vm", + TLSDisabled: false, + UUID: "472807c2-f175-4946-9765-149701a5aba7", + UpdatedAt: time.Time{}, + VolumeDriver: "", +} + +var ExpectedUpdateClusterTemplate_EmptyTime = clustertemplates.ClusterTemplate{ + COE: "kubernetes", + ClusterDistro: "fedora-atomic", + CreatedAt: time.Time{}, + DNSNameServer: "8.8.8.8", + DockerStorageDriver: "", + DockerVolumeSize: 5, + ExternalNetworkID: "public", + FixedNetwork: "", + FixedSubnet: "", + FlavorID: "m1.small", + HTTPProxy: "", + HTTPSProxy: "", + ImageID: "fedora-atomic-latest", + InsecureRegistry: "", + KeyPairID: "testkey", + Labels: map[string]string{}, + Links: []gophercloud.Link{ + {Href: "http://65.61.151.130:9511/v1/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "self"}, + {Href: "http://65.61.151.130:9511/clustertemplates/472807c2-f175-4946-9765-149701a5aba7", Rel: "bookmark"}, + }, + MasterFlavorID: "", + MasterLBEnabled: false, + Name: "kubernetes-dev", + NetworkDriver: "flannel", + NoProxy: "", + Public: false, + RegistryEnabled: false, + ServerType: "vm", + TLSDisabled: false, + UUID: "472807c2-f175-4946-9765-149701a5aba7", + UpdatedAt: time.Time{}, + VolumeDriver: "", +} + +func HandleUpdateClusterTemplateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse) + }) +} + +func HandleUpdateClusterTemplateEmptyTimeSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, UpdateResponse_EmptyTime) + }) +} + +func HandleUpdateClusterTemplateInvalidUpdate(t *testing.T) { + th.Mux.HandleFunc("/v1/clustertemplates/7d85f602-a948-4a30-afd4-e84f47471c15", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusBadRequest) + + fmt.Fprint(w, UpdateResponse_EmptyTime) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/requests_test.go new file mode 100644 index 000000000000..cbed6b136548 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/testing/requests_test.go @@ -0,0 +1,212 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateClusterTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateClusterTemplateSuccessfully(t) + + boolFalse := false + boolTrue := true + dockerVolumeSize := 3 + opts := clustertemplates.CreateOpts{ + Name: "kubernetes-dev", + Labels: map[string]string{}, + FixedSubnet: "", + MasterFlavorID: "", + NoProxy: "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + HTTPSProxy: "http://10.164.177.169:8080", + TLSDisabled: &boolFalse, + KeyPairID: "kp", + Public: &boolFalse, + HTTPProxy: "http://10.164.177.169:8080", + DockerVolumeSize: &dockerVolumeSize, + ServerType: "vm", + ExternalNetworkID: "public", + ImageID: "Fedora-Atomic-27-20180212.2.x86_64", + VolumeDriver: "cinder", + RegistryEnabled: &boolFalse, + DockerStorageDriver: "devicemapper", + NetworkDriver: "flannel", + FixedNetwork: "", + COE: "kubernetes", + FlavorID: "m1.small", + MasterLBEnabled: &boolTrue, + DNSNameServer: "8.8.8.8", + } + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + res := clustertemplates.Create(sc, opts) + th.AssertNoErr(t, res.Err) + + requestID := res.Header.Get("X-OpenStack-Request-Id") + th.AssertEquals(t, "req-781e9bdc-4163-46eb-91c9-786c53188bbb", requestID) + + actual, err := res.Extract() + th.AssertNoErr(t, err) + + actual.CreatedAt = actual.CreatedAt.UTC() + th.AssertDeepEquals(t, ExpectedClusterTemplate, *actual) +} + +func TestDeleteClusterTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleDeleteClusterSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + res := clustertemplates.Delete(sc, "6dc6d336e3fc4c0a951b5698cd1236ee") + th.AssertNoErr(t, res.Err) + requestID := res.Header["X-Openstack-Request-Id"][0] + th.AssertEquals(t, "req-781e9bdc-4163-46eb-91c9-786c53188bbb", requestID) +} + +func TestListClusterTemplates(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListClusterTemplateSuccessfully(t) + + count := 0 + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + clustertemplates.List(sc, clustertemplates.ListOpts{Limit: 2}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := clustertemplates.ExtractClusterTemplates(page) + th.AssertNoErr(t, err) + for idx, _ := range actual { + actual[idx].CreatedAt = actual[idx].CreatedAt.UTC() + } + th.AssertDeepEquals(t, ExpectedClusterTemplates, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGetClusterTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetClusterTemplateSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + actual, err := clustertemplates.Get(sc, "7d85f602-a948-4a30-afd4-e84f47471c15").Extract() + th.AssertNoErr(t, err) + actual.CreatedAt = actual.CreatedAt.UTC() + th.AssertDeepEquals(t, ExpectedClusterTemplate, *actual) +} + +func TestGetClusterTemplateEmptyTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetClusterTemplateEmptyTimeSuccessfully(t) + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + actual, err := clustertemplates.Get(sc, "7d85f602-a948-4a30-afd4-e84f47471c15").Extract() + th.AssertNoErr(t, err) + actual.CreatedAt = actual.CreatedAt.UTC() + th.AssertDeepEquals(t, ExpectedClusterTemplate_EmptyTime, *actual) +} + +func TestUpdateClusterTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateClusterTemplateSuccessfully(t) + + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Path: "/master_lb_enabled", + Value: "True", + Op: clustertemplates.ReplaceOp, + }, + clustertemplates.UpdateOpts{ + Path: "/registry_enabled", + Value: "True", + Op: clustertemplates.ReplaceOp, + }, + } + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + res := clustertemplates.Update(sc, "7d85f602-a948-4a30-afd4-e84f47471c15", updateOpts) + th.AssertNoErr(t, res.Err) + + actual, err := res.Extract() + th.AssertNoErr(t, err) + actual.CreatedAt = actual.CreatedAt.UTC() + th.AssertDeepEquals(t, ExpectedUpdateClusterTemplate, *actual) +} + +func TestUpdateClusterTemplateEmptyTime(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateClusterTemplateEmptyTimeSuccessfully(t) + + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/master_lb_enabled", + Value: "True", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/registry_enabled", + Value: "True", + }, + } + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + actual, err := clustertemplates.Update(sc, "7d85f602-a948-4a30-afd4-e84f47471c15", updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedUpdateClusterTemplate_EmptyTime, *actual) +} + +func TestUpdateClusterTemplateInvalidUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdateClusterTemplateInvalidUpdate(t) + + updateOpts := []clustertemplates.UpdateOptsBuilder{ + clustertemplates.UpdateOpts{ + Op: clustertemplates.ReplaceOp, + Path: "/master_lb_enabled", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.RemoveOp, + Path: "/master_lb_enabled", + }, + clustertemplates.UpdateOpts{ + Op: clustertemplates.AddOp, + Path: "/master_lb_enabled", + }, + } + + sc := fake.ServiceClient() + sc.Endpoint = sc.Endpoint + "v1/" + _, err := clustertemplates.Update(sc, "7d85f602-a948-4a30-afd4-e84f47471c15", updateOpts).Extract() + th.AssertEquals(t, true, err != nil) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go new file mode 100644 index 000000000000..f90fb85108af --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/containerinfra/v1/clustertemplates/urls.go @@ -0,0 +1,35 @@ +package clustertemplates + +import ( + "github.com/gophercloud/gophercloud" +) + +var apiName = "clustertemplates" + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiName) +} + +func idURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL(apiName, id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return idURL(client, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go new file mode 100644 index 000000000000..45b9cfb4e88e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/doc.go @@ -0,0 +1,11 @@ +// Package configurations provides information and interaction with the +// configuration API resource in the Rackspace Database service. +// +// A configuration group is a collection of key/value pairs which define how a +// particular database operates. These key/value pairs are specific to each +// datastore type and serve like settings. Some directives are capable of being +// applied dynamically, while other directives require a server restart to take +// effect. The configuration group can be applied to an instance at creation or +// applied to an existing instance to modify the behavior of the running +// datastore on the instance. +package configurations diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go new file mode 100644 index 000000000000..6851c58765c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/requests.go @@ -0,0 +1,167 @@ +package configurations + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will list all of the available configurations. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return ConfigPage{pagination.SinglePageBase(r)} + }) +} + +// CreateOptsBuilder is a top-level interface which renders a JSON map. +type CreateOptsBuilder interface { + ToConfigCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts is the primary options struct for creating and modifying +// how configuration resources are associated with datastores. +type DatastoreOpts struct { + // The type of datastore. Defaults to "MySQL". + Type string `json:"type,omitempty"` + // The specific version of a datastore. Defaults to "5.6". + Version string `json:"version,omitempty"` +} + +// CreateOpts is the struct responsible for configuring new configurations. +type CreateOpts struct { + // The configuration group name + Name string `json:"name" required:"true"` + // A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} `json:"values" required:"true"` + // Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts `json:"datastore,omitempty"` + // A human-readable explanation for the group. + Description string `json:"description,omitempty"` +} + +// ToConfigCreateMap casts a CreateOpts struct into a JSON map. +func (opts CreateOpts) ToConfigCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "configuration") +} + +// Create will create a new configuration group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToConfigCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// Get will retrieve the details for a specified configuration group. +func Get(client *gophercloud.ServiceClient, configID string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, configID), &r.Body, nil) + return +} + +// UpdateOptsBuilder is the top-level interface for casting update options into +// JSON maps. +type UpdateOptsBuilder interface { + ToConfigUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the struct responsible for modifying existing configurations. +type UpdateOpts struct { + // The configuration group name + Name string `json:"name,omitempty"` + // A map of user-defined configuration settings that will define + // how each associated datastore works. Each key/value pair is specific to a + // datastore type. + Values map[string]interface{} `json:"values,omitempty"` + // Associates the configuration group with a particular datastore. + Datastore *DatastoreOpts `json:"datastore,omitempty"` + // A human-readable explanation for the group. + Description string `json:"description,omitempty"` +} + +// ToConfigUpdateMap will cast an UpdateOpts struct into a JSON map. +func (opts UpdateOpts) ToConfigUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "configuration") +} + +// Update will modify an existing configuration group by performing a merge +// between new and existing values. If the key already exists, the new value +// will overwrite. All other keys will remain unaffected. +func Update(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToConfigUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(resourceURL(client, configID), &b, nil, nil) + return +} + +// Replace will modify an existing configuration group by overwriting the +// entire parameter group with the new values provided. Any existing keys not +// included in UpdateOptsBuilder will be deleted. +func Replace(client *gophercloud.ServiceClient, configID string, opts UpdateOptsBuilder) (r ReplaceResult) { + b, err := opts.ToConfigUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(resourceURL(client, configID), &b, nil, nil) + return +} + +// Delete will permanently delete a configuration group. Please note that +// config groups cannot be deleted whilst still attached to running instances - +// you must detach and then delete them. +func Delete(client *gophercloud.ServiceClient, configID string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, configID), nil) + return +} + +// ListInstances will list all the instances associated with a particular +// configuration group. +func ListInstances(client *gophercloud.ServiceClient, configID string) pagination.Pager { + return pagination.NewPager(client, instancesURL(client, configID), func(r pagination.PageResult) pagination.Page { + return instances.InstancePage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListDatastoreParams will list all the available and supported parameters +// that can be used for a particular datastore ID and a particular version. +// For example, if you are wondering how you can configure a MySQL 5.6 instance, +// you can use this operation (you will need to retrieve the MySQL datastore ID +// by using the datastores API). +func ListDatastoreParams(client *gophercloud.ServiceClient, datastoreID, versionID string) pagination.Pager { + return pagination.NewPager(client, listDSParamsURL(client, datastoreID, versionID), func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + }) +} + +// GetDatastoreParam will retrieve information about a specific configuration +// parameter. For example, you can use this operation to understand more about +// "innodb_file_per_table" configuration param for MySQL datastores. You will +// need the param's ID first, which can be attained by using the ListDatastoreParams +// operation. +func GetDatastoreParam(client *gophercloud.ServiceClient, datastoreID, versionID, paramID string) (r ParamResult) { + _, r.Err = client.Get(getDSParamURL(client, datastoreID, versionID, paramID), &r.Body, nil) + return +} + +// ListGlobalParams is similar to ListDatastoreParams but does not require a +// DatastoreID. +func ListGlobalParams(client *gophercloud.ServiceClient, versionID string) pagination.Pager { + return pagination.NewPager(client, listGlobalParamsURL(client, versionID), func(r pagination.PageResult) pagination.Page { + return ParamPage{pagination.SinglePageBase(r)} + }) +} + +// GetGlobalParam is similar to GetDatastoreParam but does not require a +// DatastoreID. +func GetGlobalParam(client *gophercloud.ServiceClient, versionID, paramID string) (r ParamResult) { + _, r.Err = client.Get(getGlobalParamURL(client, versionID, paramID), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go new file mode 100644 index 000000000000..13bcbffe8b96 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/results.go @@ -0,0 +1,141 @@ +package configurations + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Config represents a configuration group API resource. +type Config struct { + Created time.Time `json:"-"` + Updated time.Time `json:"-"` + DatastoreName string `json:"datastore_name"` + DatastoreVersionID string `json:"datastore_version_id"` + DatastoreVersionName string `json:"datastore_version_name"` + Description string + ID string + Name string + Values map[string]interface{} +} + +func (r *Config) UnmarshalJSON(b []byte) error { + type tmp Config + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Config(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +// ConfigPage contains a page of Config resources in a paginated collection. +type ConfigPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ConfigPage is empty. +func (r ConfigPage) IsEmpty() (bool, error) { + is, err := ExtractConfigs(r) + return len(is) == 0, err +} + +// ExtractConfigs will retrieve a slice of Config structs from a page. +func ExtractConfigs(r pagination.Page) ([]Config, error) { + var s struct { + Configs []Config `json:"configurations"` + } + err := (r.(ConfigPage)).ExtractInto(&s) + return s.Configs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will retrieve a Config resource from an operation result. +func (r commonResult) Extract() (*Config, error) { + var s struct { + Config *Config `json:"configuration"` + } + err := r.ExtractInto(&s) + return s.Config, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// ReplaceResult represents the result of a Replace operation. +type ReplaceResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Param represents a configuration parameter API resource. +type Param struct { + Max float64 + Min float64 + Name string + RestartRequired bool `json:"restart_required"` + Type string +} + +// ParamPage contains a page of Param resources in a paginated collection. +type ParamPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a ParamPage is empty. +func (r ParamPage) IsEmpty() (bool, error) { + is, err := ExtractParams(r) + return len(is) == 0, err +} + +// ExtractParams will retrieve a slice of Param structs from a page. +func ExtractParams(r pagination.Page) ([]Param, error) { + var s struct { + Params []Param `json:"configuration-parameters"` + } + err := (r.(ParamPage)).ExtractInto(&s) + return s.Params, err +} + +// ParamResult represents the result of an operation which retrieves details +// about a particular configuration param. +type ParamResult struct { + gophercloud.Result +} + +// Extract will retrieve a param from an operation result. +func (r ParamResult) Extract() (*Param, error) { + var s *Param + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/doc.go new file mode 100644 index 000000000000..60c997a2b3bc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/doc.go @@ -0,0 +1,2 @@ +// db_configurations_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/fixtures.go new file mode 100644 index 000000000000..3d515ea2b157 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/fixtures.go @@ -0,0 +1,160 @@ +package testing + +import ( + "fmt" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/configurations" +) + +var ( + timestamp = "2015-11-12T14:22:42" + timeVal, _ = time.Parse(gophercloud.RFC3339NoZ, timestamp) +) + +var singleConfigJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example_description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "name": "example-configuration-name", + "updated": "` + timestamp + `" +} +` + +var singleConfigWithValuesJSON = ` +{ + "created": "` + timestamp + `", + "datastore_name": "mysql", + "datastore_version_id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "datastore_version_name": "5.6", + "description": "example description", + "id": "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + "instance_count": 0, + "name": "example-configuration-name", + "updated": "` + timestamp + `", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } +} +` + +var ( + ListConfigsJSON = fmt.Sprintf(`{"configurations": [%s]}`, singleConfigJSON) + GetConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigJSON) + CreateConfigJSON = fmt.Sprintf(`{"configuration": %s}`, singleConfigWithValuesJSON) +) + +var CreateReq = ` +{ + "configuration": { + "datastore": { + "type": "a00000a0-00a0-0a00-00a0-000a000000aa", + "version": "b00000b0-00b0-0b00-00b0-000b000000bb" + }, + "description": "example description", + "name": "example-configuration-name", + "values": { + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120 + } + } +} +` + +var UpdateReq = ` +{ + "configuration": { + "values": { + "connect_timeout": 300 + } + } +} +` + +var ListInstancesJSON = ` +{ + "instances": [ + { + "id": "d4603f69-ec7e-4e9b-803f-600b9205576f", + "name": "json_rack_instance" + } + ] +} +` + +var ListParamsJSON = ` +{ + "configuration-parameters": [ + { + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "key_buffer_size", + "restart_required": false, + "type": "integer" + }, + { + "max": 65535, + "min": 2, + "name": "connect_timeout", + "restart_required": false, + "type": "integer" + }, + { + "max": 4294967296, + "min": 0, + "name": "join_buffer_size", + "restart_required": false, + "type": "integer" + } + ] +} +` + +var GetParamJSON = ` +{ + "max": 1, + "min": 0, + "name": "innodb_file_per_table", + "restart_required": true, + "type": "integer" +} +` + +var ExampleConfig = configurations.Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example_description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, +} + +var ExampleConfigWithValues = configurations.Config{ + Created: timeVal, + DatastoreName: "mysql", + DatastoreVersionID: "b00000b0-00b0-0b00-00b0-000b000000bb", + DatastoreVersionName: "5.6", + Description: "example description", + ID: "005a8bb7-a8df-40ee-b0b7-fc144641abc2", + Name: "example-configuration-name", + Updated: timeVal, + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": float64(120), + }, +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/requests_test.go new file mode 100644 index 000000000000..643f3634264b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/testing/requests_test.go @@ -0,0 +1,237 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/db/v1/configurations" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +var ( + configID = "{configID}" + _baseURL = "/configurations" + resURL = _baseURL + "/" + configID + + dsID = "{datastoreID}" + versionID = "{versionID}" + paramID = "{paramID}" + dsParamListURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters" + dsParamGetURL = "/datastores/" + dsID + "/versions/" + versionID + "/parameters/" + paramID + globalParamListURL = "/datastores/versions/" + versionID + "/parameters" + globalParamGetURL = "/datastores/versions/" + versionID + "/parameters/" + paramID +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "GET", "", ListConfigsJSON, 200) + + count := 0 + err := configurations.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := configurations.ExtractConfigs(page) + th.AssertNoErr(t, err) + + expected := []configurations.Config{ExampleConfig} + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertEquals(t, 1, count) + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "GET", "", GetConfigJSON, 200) + + config, err := configurations.Get(fake.ServiceClient(), configID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleConfig, config) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, _baseURL, "POST", CreateReq, CreateConfigJSON, 200) + + opts := configurations.CreateOpts{ + Datastore: &configurations.DatastoreOpts{ + Type: "a00000a0-00a0-0a00-00a0-000a000000aa", + Version: "b00000b0-00b0-0b00-00b0-000b000000bb", + }, + Description: "example description", + Name: "example-configuration-name", + Values: map[string]interface{}{ + "collation_server": "latin1_swedish_ci", + "connect_timeout": 120, + }, + } + + config, err := configurations.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleConfigWithValues, config) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PATCH", UpdateReq, "", 200) + + opts := configurations.UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := configurations.Update(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestReplace(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "PUT", UpdateReq, "", 202) + + opts := configurations.UpdateOpts{ + Values: map[string]interface{}{ + "connect_timeout": 300, + }, + } + + err := configurations.Replace(fake.ServiceClient(), configID, opts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) + + err := configurations.Delete(fake.ServiceClient(), configID).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListInstances(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, resURL+"/instances", "GET", "", ListInstancesJSON, 200) + + expectedInstance := instances.Instance{ + ID: "d4603f69-ec7e-4e9b-803f-600b9205576f", + Name: "json_rack_instance", + } + + pages := 0 + err := configurations.ListInstances(fake.ServiceClient(), configID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := instances.ExtractInstances(page) + if err != nil { + return false, err + } + + th.AssertDeepEquals(t, actual, []instances.Instance{expectedInstance}) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestListDSParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamListURL, "GET", "", ListParamsJSON, 200) + + pages := 0 + err := configurations.ListDatastoreParams(fake.ServiceClient(), dsID, versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := configurations.ExtractParams(page) + if err != nil { + return false, err + } + + expected := []configurations.Param{ + {Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + {Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + {Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + {Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetDSParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, dsParamGetURL, "GET", "", GetParamJSON, 200) + + param, err := configurations.GetDatastoreParam(fake.ServiceClient(), dsID, versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &configurations.Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} + +func TestListGlobalParams(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamListURL, "GET", "", ListParamsJSON, 200) + + pages := 0 + err := configurations.ListGlobalParams(fake.ServiceClient(), versionID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := configurations.ExtractParams(page) + if err != nil { + return false, err + } + + expected := []configurations.Param{ + {Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer"}, + {Max: 4294967296, Min: 0, Name: "key_buffer_size", RestartRequired: false, Type: "integer"}, + {Max: 65535, Min: 2, Name: "connect_timeout", RestartRequired: false, Type: "integer"}, + {Max: 4294967296, Min: 0, Name: "join_buffer_size", RestartRequired: false, Type: "integer"}, + } + + th.AssertDeepEquals(t, actual, expected) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetGlobalParam(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, globalParamGetURL, "GET", "", GetParamJSON, 200) + + param, err := configurations.GetGlobalParam(fake.ServiceClient(), versionID, paramID).Extract() + th.AssertNoErr(t, err) + + expected := &configurations.Param{ + Max: 1, Min: 0, Name: "innodb_file_per_table", RestartRequired: true, Type: "integer", + } + + th.AssertDeepEquals(t, expected, param) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go new file mode 100644 index 000000000000..0a69253a72d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/configurations/urls.go @@ -0,0 +1,31 @@ +package configurations + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("configurations") +} + +func resourceURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID) +} + +func instancesURL(c *gophercloud.ServiceClient, configID string) string { + return c.ServiceURL("configurations", configID, "instances") +} + +func listDSParamsURL(c *gophercloud.ServiceClient, datastoreID, versionID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters") +} + +func getDSParamURL(c *gophercloud.ServiceClient, datastoreID, versionID, paramID string) string { + return c.ServiceURL("datastores", datastoreID, "versions", versionID, "parameters", paramID) +} + +func listGlobalParamsURL(c *gophercloud.ServiceClient, versionID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters") +} + +func getGlobalParamURL(c *gophercloud.ServiceClient, versionID, paramID string) string { + return c.ServiceURL("datastores", "versions", versionID, "parameters", paramID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go new file mode 100644 index 000000000000..15275fe5d7d0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/doc.go @@ -0,0 +1,6 @@ +// Package flavors provides information and interaction with the database API +// resource in the OpenStack Database service. +// +// A database, when referred to here, refers to the database engine running on +// an instance. +package databases diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go new file mode 100644 index 000000000000..ef5394f9c687 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/requests.go @@ -0,0 +1,89 @@ +package databases + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder builds create options +type CreateOptsBuilder interface { + ToDBCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the struct responsible for configuring a database; often in +// the context of an instance. +type CreateOpts struct { + // Specifies the name of the database. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. + Name string `json:"name" required:"true"` + // Set of symbols and encodings. The default character set is + // "utf8". See http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for + // supported character sets. + CharSet string `json:"character_set,omitempty"` + // Set of rules for comparing characters in a character set. The + // default value for collate is "utf8_general_ci". See + // http://dev.mysql.com/doc/refman/5.1/en/charset-mysql.html for supported + // collations. + Collate string `json:"collate,omitempty"` +} + +// ToMap is a helper function to convert individual DB create opt structures +// into sub-maps. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + if len(opts.Name) > 64 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "databases.CreateOpts.Name" + err.Value = opts.Name + err.Info = "Must be less than 64 chars long" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "") +} + +// BatchCreateOpts allows for multiple databases to created and modified. +type BatchCreateOpts []CreateOpts + +// ToDBCreateMap renders a JSON map for creating DBs. +func (opts BatchCreateOpts) ToDBCreateMap() (map[string]interface{}, error) { + dbs := make([]map[string]interface{}, len(opts)) + for i, db := range opts { + dbMap, err := db.ToMap() + if err != nil { + return nil, err + } + dbs[i] = dbMap + } + return map[string]interface{}{"databases": dbs}, nil +} + +// Create will create a new database within the specified instance. If the +// specified instance does not exist, a 404 error will be returned. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToDBCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, instanceID), &b, nil, nil) + return +} + +// List will list all of the databases for a specified instance. Note: this +// operation will only return user-defined databases; it will exclude system +// databases like "mysql", "information_schema", "lost+found" etc. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + return pagination.NewPager(client, baseURL(client, instanceID), func(r pagination.PageResult) pagination.Page { + return DBPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete the database within a specified instance. +// All contained data inside the database will also be permanently deleted. +func Delete(client *gophercloud.ServiceClient, instanceID, dbName string) (r DeleteResult) { + _, r.Err = client.Delete(dbURL(client, instanceID, dbName), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go new file mode 100644 index 000000000000..0479d0e6eb70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/results.go @@ -0,0 +1,63 @@ +package databases + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Database represents a Database API resource. +type Database struct { + // Specifies the name of the MySQL database. + Name string + + // Set of symbols and encodings. The default character set is utf8. + CharSet string + + // Set of rules for comparing characters in a character set. The default + // value for collate is utf8_general_ci. + Collate string +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// DBPage represents a single page of a paginated DB collection. +type DBPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page DBPage) IsEmpty() (bool, error) { + dbs, err := ExtractDBs(page) + return len(dbs) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page DBPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"databases_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractDBs will convert a generic pagination struct into a more +// relevant slice of DB structs. +func ExtractDBs(page pagination.Page) ([]Database, error) { + r := page.(DBPage) + var s struct { + Databases []Database `json:"databases"` + } + err := r.ExtractInto(&s) + return s.Databases, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/doc.go new file mode 100644 index 000000000000..abdfab98b512 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/doc.go @@ -0,0 +1,2 @@ +// db_databases_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/fixtures.go new file mode 100644 index 000000000000..02b9ecc2a33e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/fixtures.go @@ -0,0 +1,61 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +var ( + instanceID = "{instanceID}" + resURL = "/instances/" + instanceID + "/databases" +) + +var createDBsReq = ` +{ + "databases": [ + { + "character_set": "utf8", + "collate": "utf8_general_ci", + "name": "testingdb" + }, + { + "name": "sampledb" + } + ] +} +` + +var listDBsResp = ` +{ + "databases": [ + { + "name": "anotherexampledb" + }, + { + "name": "exampledb" + }, + { + "name": "nextround" + }, + { + "name": "sampledb" + }, + { + "name": "testingdb" + } + ] +} +` + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, resURL, "POST", createDBsReq, "", 202) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", listDBsResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, resURL+"/{dbName}", "DELETE", "", "", 202) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/requests_test.go new file mode 100644 index 000000000000..a470ffa899f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/testing/requests_test.go @@ -0,0 +1,67 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := databases.BatchCreateOpts{ + databases.CreateOpts{Name: "testingdb", CharSet: "utf8", Collate: "utf8_general_ci"}, + databases.CreateOpts{Name: "sampledb"}, + } + + res := databases.Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + expectedDBs := []databases.Database{ + {Name: "anotherexampledb"}, + {Name: "exampledb"}, + {Name: "nextround"}, + {Name: "sampledb"}, + {Name: "testingdb"}, + } + + pages := 0 + err := databases.List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := databases.ExtractDBs(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedDBs, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + err := databases.Delete(fake.ServiceClient(), instanceID, "{dbName}").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go new file mode 100644 index 000000000000..aba42c9c879d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/databases/urls.go @@ -0,0 +1,11 @@ +package databases + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "databases") +} + +func dbURL(c *gophercloud.ServiceClient, instanceID, dbName string) string { + return c.ServiceURL("instances", instanceID, "databases", dbName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go new file mode 100644 index 000000000000..ae14026b5258 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/doc.go @@ -0,0 +1,3 @@ +// Package datastores provides information and interaction with the datastore +// API resource in the Rackspace Database service. +package datastores diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go new file mode 100644 index 000000000000..134e30907687 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/requests.go @@ -0,0 +1,33 @@ +package datastores + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will list all available datastore types that instances can use. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return DatastorePage{pagination.SinglePageBase(r)} + }) +} + +// Get will retrieve the details of a specified datastore type. +func Get(client *gophercloud.ServiceClient, datastoreID string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, datastoreID), &r.Body, nil) + return +} + +// ListVersions will list all of the available versions for a specified +// datastore type. +func ListVersions(client *gophercloud.ServiceClient, datastoreID string) pagination.Pager { + return pagination.NewPager(client, versionsURL(client, datastoreID), func(r pagination.PageResult) pagination.Page { + return VersionPage{pagination.SinglePageBase(r)} + }) +} + +// GetVersion will retrieve the details of a specified datastore version. +func GetVersion(client *gophercloud.ServiceClient, datastoreID, versionID string) (r GetVersionResult) { + _, r.Err = client.Get(versionURL(client, datastoreID, versionID), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go new file mode 100644 index 000000000000..a6e27d274549 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/results.go @@ -0,0 +1,100 @@ +package datastores + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Version represents a version API resource. Multiple versions belong to a Datastore. +type Version struct { + ID string + Links []gophercloud.Link + Name string +} + +// Datastore represents a Datastore API resource. +type Datastore struct { + DefaultVersion string `json:"default_version"` + ID string + Links []gophercloud.Link + Name string + Versions []Version +} + +// DatastorePartial is a meta structure which is used in various API responses. +// It is a lightweight and truncated version of a full Datastore resource, +// offering details of the Version, Type and VersionID only. +type DatastorePartial struct { + Version string + Type string + VersionID string `json:"version_id"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// GetVersionResult represents the result of getting a version. +type GetVersionResult struct { + gophercloud.Result +} + +// DatastorePage represents a page of datastore resources. +type DatastorePage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a Datastore collection is empty. +func (r DatastorePage) IsEmpty() (bool, error) { + is, err := ExtractDatastores(r) + return len(is) == 0, err +} + +// ExtractDatastores retrieves a slice of datastore structs from a paginated +// collection. +func ExtractDatastores(r pagination.Page) ([]Datastore, error) { + var s struct { + Datastores []Datastore `json:"datastores"` + } + err := (r.(DatastorePage)).ExtractInto(&s) + return s.Datastores, err +} + +// Extract retrieves a single Datastore struct from an operation result. +func (r GetResult) Extract() (*Datastore, error) { + var s struct { + Datastore *Datastore `json:"datastore"` + } + err := r.ExtractInto(&s) + return s.Datastore, err +} + +// VersionPage represents a page of version resources. +type VersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty indicates whether a collection of version resources is empty. +func (r VersionPage) IsEmpty() (bool, error) { + is, err := ExtractVersions(r) + return len(is) == 0, err +} + +// ExtractVersions retrieves a slice of versions from a paginated collection. +func ExtractVersions(r pagination.Page) ([]Version, error) { + var s struct { + Versions []Version `json:"versions"` + } + err := (r.(VersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// Extract retrieves a single Version struct from an operation result. +func (r GetVersionResult) Extract() (*Version, error) { + var s struct { + Version *Version `json:"version"` + } + err := r.ExtractInto(&s) + return s.Version, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/doc.go new file mode 100644 index 000000000000..8f06f86c1d1c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/doc.go @@ -0,0 +1,2 @@ +// db_datastores_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/fixtures.go new file mode 100644 index 000000000000..3b82646342e6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/fixtures.go @@ -0,0 +1,101 @@ +package testing + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/datastores" +) + +const version1JSON = ` +{ + "id": "b00000b0-00b0-0b00-00b0-000b000000bb", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb", + "rel": "bookmark" + } + ], + "name": "5.1" +} +` + +const version2JSON = ` +{ + "id": "c00000b0-00c0-0c00-00c0-000b000000cc", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc", + "rel": "bookmark" + } + ], + "name": "5.2" +} +` + +var versionsJSON = fmt.Sprintf(`"versions": [%s, %s]`, version1JSON, version2JSON) + +var singleDSJSON = fmt.Sprintf(` +{ + "default_version": "c00000b0-00c0-0c00-00c0-000b000000cc", + "id": "10000000-0000-0000-0000-000000000001", + "links": [ + { + "href": "https://10.240.28.38:8779/v1.0/1234/datastores/10000000-0000-0000-0000-000000000001", + "rel": "self" + }, + { + "href": "https://10.240.28.38:8779/datastores/10000000-0000-0000-0000-000000000001", + "rel": "bookmark" + } + ], + "name": "mysql", + %s +} +`, versionsJSON) + +var ( + ListDSResp = fmt.Sprintf(`{"datastores":[%s]}`, singleDSJSON) + GetDSResp = fmt.Sprintf(`{"datastore":%s}`, singleDSJSON) + ListVersionsResp = fmt.Sprintf(`{%s}`, versionsJSON) + GetVersionResp = fmt.Sprintf(`{"version":%s}`, version1JSON) +) + +var ExampleVersion1 = datastores.Version{ + ID: "b00000b0-00b0-0b00-00b0-000b000000bb", + Links: []gophercloud.Link{ + {Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb"}, + {Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/versions/b00000b0-00b0-0b00-00b0-000b000000bb"}, + }, + Name: "5.1", +} + +var exampleVersion2 = datastores.Version{ + ID: "c00000b0-00c0-0c00-00c0-000b000000cc", + Links: []gophercloud.Link{ + {Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc"}, + {Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/versions/c00000b0-00c0-0c00-00c0-000b000000cc"}, + }, + Name: "5.2", +} + +var ExampleVersions = []datastores.Version{ExampleVersion1, exampleVersion2} + +var ExampleDatastore = datastores.Datastore{ + DefaultVersion: "c00000b0-00c0-0c00-00c0-000b000000cc", + ID: "10000000-0000-0000-0000-000000000001", + Links: []gophercloud.Link{ + {Rel: "self", Href: "https://10.240.28.38:8779/v1.0/1234/datastores/10000000-0000-0000-0000-000000000001"}, + {Rel: "bookmark", Href: "https://10.240.28.38:8779/datastores/10000000-0000-0000-0000-000000000001"}, + }, + Name: "mysql", + Versions: ExampleVersions, +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/requests_test.go new file mode 100644 index 000000000000..b505726d3f03 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/testing/requests_test.go @@ -0,0 +1,79 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/db/v1/datastores" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores", "GET", "", ListDSResp, 200) + + pages := 0 + + err := datastores.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := datastores.ExtractDatastores(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []datastores.Datastore{ExampleDatastore}, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}", "GET", "", GetDSResp, 200) + + ds, err := datastores.Get(fake.ServiceClient(), "{dsID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleDatastore, ds) +} + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions", "GET", "", ListVersionsResp, 200) + + pages := 0 + + err := datastores.ListVersions(fake.ServiceClient(), "{dsID}").EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := datastores.ExtractVersions(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, ExampleVersions, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + fixture.SetupHandler(t, "/datastores/{dsID}/versions/{versionID}", "GET", "", GetVersionResp, 200) + + ds, err := datastores.GetVersion(fake.ServiceClient(), "{dsID}", "{versionID}").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &ExampleVersion1, ds) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go new file mode 100644 index 000000000000..06d1b3daec98 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/datastores/urls.go @@ -0,0 +1,19 @@ +package datastores + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("datastores") +} + +func resourceURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID) +} + +func versionsURL(c *gophercloud.ServiceClient, dsID string) string { + return c.ServiceURL("datastores", dsID, "versions") +} + +func versionURL(c *gophercloud.ServiceClient, dsID, versionID string) string { + return c.ServiceURL("datastores", dsID, "versions", versionID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/doc.go new file mode 100644 index 000000000000..4d281d562fff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/doc.go @@ -0,0 +1,7 @@ +// Package flavors provides information and interaction with the flavor API +// resource in the OpenStack Database service. +// +// A flavor is an available hardware configuration for a database instance. +// Each flavor has a unique combination of disk space, memory capacity and +// priority for CPU time. +package flavors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/requests.go new file mode 100644 index 000000000000..7fac56ae601b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/requests.go @@ -0,0 +1,21 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will list all available hardware flavors that an instance can use. The +// operation is identical to the one supported by the Nova API, but without the +// "disk" property. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return FlavorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get will retrieve information for a specified hardware flavor. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/results.go new file mode 100644 index 000000000000..0ba515ce3efb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/results.go @@ -0,0 +1,71 @@ +package flavors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GetResult temporarily holds the response from a Get call. +type GetResult struct { + gophercloud.Result +} + +// Extract provides access to the individual Flavor returned by the Get function. +func (r GetResult) Extract() (*Flavor, error) { + var s struct { + Flavor *Flavor `json:"flavor"` + } + err := r.ExtractInto(&s) + return s.Flavor, err +} + +// Flavor records represent (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The flavor's unique identifier. + // Contains 0 if the ID is not an integer. + ID int `json:"id"` + + // The RAM capacity for the flavor. + RAM int `json:"ram"` + + // The Name field provides a human-readable moniker for the flavor. + Name string `json:"name"` + + // Links to access the flavor. + Links []gophercloud.Link + + // The flavor's unique identifier as a string + StrID string `json:"str_id"` +} + +// FlavorPage contains a single page of the response from a List call. +type FlavorPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines if a page contains any results. +func (page FlavorPage) IsEmpty() (bool, error) { + flavors, err := ExtractFlavors(page) + return len(flavors) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the next page of results. +func (page FlavorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"flavors_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractFlavors provides access to the list of flavors in a page acquired from the List operation. +func ExtractFlavors(r pagination.Page) ([]Flavor, error) { + var s struct { + Flavors []Flavor `json:"flavors"` + } + err := (r.(FlavorPage)).ExtractInto(&s) + return s.Flavors, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/doc.go new file mode 100644 index 000000000000..08092661c571 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/doc.go @@ -0,0 +1,2 @@ +// db_flavors_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/fixtures.go new file mode 100644 index 000000000000..9c323b80c7aa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/fixtures.go @@ -0,0 +1,52 @@ +package testing + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +const flavor = ` +{ + "id": %s, + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/flavors/%s", + "rel": "self" + }, + { + "href": "https://openstack.example.com/flavors/%s", + "rel": "bookmark" + } + ], + "name": "%s", + "ram": %d, + "str_id": "%s" +} +` + +var ( + flavorID = "{flavorID}" + _baseURL = "/flavors" + resURL = "/flavors/" + flavorID +) + +var ( + flavor1 = fmt.Sprintf(flavor, "1", "1", "1", "m1.tiny", 512, "1") + flavor2 = fmt.Sprintf(flavor, "2", "2", "2", "m1.small", 1024, "2") + flavor3 = fmt.Sprintf(flavor, "3", "3", "3", "m1.medium", 2048, "3") + flavor4 = fmt.Sprintf(flavor, "4", "4", "4", "m1.large", 4096, "4") + flavor5 = fmt.Sprintf(flavor, "null", "d1", "d1", "ds512M", 512, "d1") + + listFlavorsResp = fmt.Sprintf(`{"flavors":[%s, %s, %s, %s, %s]}`, flavor1, flavor2, flavor3, flavor4, flavor5) + getFlavorResp = fmt.Sprintf(`{"flavor": %s}`, flavor1) +) + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, _baseURL, "GET", "", listFlavorsResp, 200) +} + +func HandleGet(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", getFlavorResp, 200) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/requests_test.go new file mode 100644 index 000000000000..e8b580aef6b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/testing/requests_test.go @@ -0,0 +1,107 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/flavors" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListFlavors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + pages := 0 + err := flavors.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := flavors.ExtractFlavors(page) + if err != nil { + return false, err + } + + expected := []flavors.Flavor{ + { + ID: 1, + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + {Href: "https://openstack.example.com/flavors/1", Rel: "bookmark"}, + }, + StrID: "1", + }, + { + ID: 2, + Name: "m1.small", + RAM: 1024, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/2", Rel: "self"}, + {Href: "https://openstack.example.com/flavors/2", Rel: "bookmark"}, + }, + StrID: "2", + }, + { + ID: 3, + Name: "m1.medium", + RAM: 2048, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/3", Rel: "self"}, + {Href: "https://openstack.example.com/flavors/3", Rel: "bookmark"}, + }, + StrID: "3", + }, + { + ID: 4, + Name: "m1.large", + RAM: 4096, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/4", Rel: "self"}, + {Href: "https://openstack.example.com/flavors/4", Rel: "bookmark"}, + }, + StrID: "4", + }, + { + ID: 0, + Name: "ds512M", + RAM: 512, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/d1", Rel: "self"}, + {Href: "https://openstack.example.com/flavors/d1", Rel: "bookmark"}, + }, + StrID: "d1", + }, + } + + th.AssertDeepEquals(t, expected, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetFlavor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGet(t) + + actual, err := flavors.Get(fake.ServiceClient(), flavorID).Extract() + th.AssertNoErr(t, err) + + expected := &flavors.Flavor{ + ID: 1, + Name: "m1.tiny", + RAM: 512, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + }, + StrID: "1", + } + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/urls.go new file mode 100644 index 000000000000..a24301b18651 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/flavors/urls.go @@ -0,0 +1,11 @@ +package flavors + +import "github.com/gophercloud/gophercloud" + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("flavors", id) +} + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("flavors") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go new file mode 100644 index 000000000000..dc5c90f9551b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/doc.go @@ -0,0 +1,7 @@ +// Package instances provides information and interaction with the instance API +// resource in the OpenStack Database service. +// +// A database instance is an isolated database environment with compute and +// storage resources in a single tenant environment on a shared physical host +// machine. +package instances diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go new file mode 100644 index 000000000000..bbd909318203 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/requests.go @@ -0,0 +1,219 @@ +package instances + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for create options. +type CreateOptsBuilder interface { + ToInstanceCreateMap() (map[string]interface{}, error) +} + +// DatastoreOpts represents the configuration for how an instance stores data. +type DatastoreOpts struct { + Version string `json:"version"` + Type string `json:"type"` +} + +// ToMap converts a DatastoreOpts to a map[string]string (for a request body) +func (opts DatastoreOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// NetworkOpts is used within CreateOpts to control a new server's network attachments. +type NetworkOpts struct { + // UUID of a nova-network to attach to the newly provisioned server. + // Required unless Port is provided. + UUID string `json:"net-id,omitempty"` + + // Port of a neutron network to attach to the newly provisioned server. + // Required unless UUID is provided. + Port string `json:"port-id,omitempty"` + + // V4FixedIP [optional] specifies a fixed IPv4 address to be used on this network. + V4FixedIP string `json:"v4-fixed-ip,omitempty"` + + // V6FixedIP [optional] specifies a fixed IPv6 address to be used on this network. + V6FixedIP string `json:"v6-fixed-ip,omitempty"` +} + +// ToMap converts a NetworkOpts to a map[string]string (for a request body) +func (opts NetworkOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// CreateOpts is the struct responsible for configuring a new database instance. +type CreateOpts struct { + // Either the integer UUID (in string form) of the flavor, or its URI + // reference as specified in the response from the List() call. Required. + FlavorRef string + // Specifies the volume size in gigabytes (GB). The value must be between 1 + // and 300. Required. + Size int + // Name of the instance to create. The length of the name is limited to + // 255 characters and any characters are permitted. Optional. + Name string + // A slice of database information options. + Databases db.CreateOptsBuilder + // A slice of user information options. + Users users.CreateOptsBuilder + // Options to configure the type of datastore the instance will use. This is + // optional, and if excluded will default to MySQL. + Datastore *DatastoreOpts + // Networks dictates how this server will be attached to available networks. + Networks []NetworkOpts +} + +// ToInstanceCreateMap will render a JSON map. +func (opts CreateOpts) ToInstanceCreateMap() (map[string]interface{}, error) { + if opts.Size > 300 || opts.Size < 1 { + err := gophercloud.ErrInvalidInput{} + err.Argument = "instances.CreateOpts.Size" + err.Value = opts.Size + err.Info = "Size (GB) must be between 1-300" + return nil, err + } + + if opts.FlavorRef == "" { + return nil, gophercloud.ErrMissingInput{Argument: "instances.CreateOpts.FlavorRef"} + } + + instance := map[string]interface{}{ + "volume": map[string]int{"size": opts.Size}, + "flavorRef": opts.FlavorRef, + } + + if opts.Name != "" { + instance["name"] = opts.Name + } + if opts.Databases != nil { + dbs, err := opts.Databases.ToDBCreateMap() + if err != nil { + return nil, err + } + instance["databases"] = dbs["databases"] + } + if opts.Users != nil { + users, err := opts.Users.ToUserCreateMap() + if err != nil { + return nil, err + } + instance["users"] = users["users"] + } + if opts.Datastore != nil { + datastore, err := opts.Datastore.ToMap() + if err != nil { + return nil, err + } + instance["datastore"] = datastore + } + + if len(opts.Networks) > 0 { + networks := make([]map[string]interface{}, len(opts.Networks)) + for i, net := range opts.Networks { + var err error + networks[i], err = net.ToMap() + if err != nil { + return nil, err + } + } + instance["nics"] = networks + } + + return map[string]interface{}{"instance": instance}, nil +} + +// Create asynchronously provisions a new database instance. It requires the +// user to specify a flavor and a volume size. The API service then provisions +// the instance with the requested flavor and sets up a volume of the specified +// size, which is the storage for the database instance. +// +// Although this call only allows the creation of 1 instance per request, you +// can create an instance with multiple databases and users. The default +// binding for a MySQL instance is port 3306. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToInstanceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// List retrieves the status and information for all database instances. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, baseURL(client), func(r pagination.PageResult) pagination.Page { + return InstancePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves the status and information for a specified database instance. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(resourceURL(client, id), &r.Body, nil) + return +} + +// Delete permanently destroys the database instance. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(resourceURL(client, id), nil) + return +} + +// EnableRootUser enables the login from any host for the root user and +// provides the user with a generated root password. +func EnableRootUser(client *gophercloud.ServiceClient, id string) (r EnableRootUserResult) { + _, r.Err = client.Post(userRootURL(client, id), nil, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// IsRootEnabled checks an instance to see if root access is enabled. It returns +// True if root user is enabled for the specified database instance or False +// otherwise. +func IsRootEnabled(client *gophercloud.ServiceClient, id string) (r IsRootEnabledResult) { + _, r.Err = client.Get(userRootURL(client, id), &r.Body, nil) + return +} + +// Restart will restart only the MySQL Instance. Restarting MySQL will +// erase any dynamic configuration settings that you have made within MySQL. +// The MySQL service will be unavailable until the instance restarts. +func Restart(client *gophercloud.ServiceClient, id string) (r ActionResult) { + b := map[string]interface{}{"restart": struct{}{}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// Resize changes the memory size of the instance, assuming a valid +// flavorRef is provided. It will also restart the MySQL service. +func Resize(client *gophercloud.ServiceClient, id, flavorRef string) (r ActionResult) { + b := map[string]interface{}{"resize": map[string]string{"flavorRef": flavorRef}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// ResizeVolume will resize the attached volume for an instance. It supports +// only increasing the volume size and does not support decreasing the size. +// The volume size is in gigabytes (GB) and must be an integer. +func ResizeVolume(client *gophercloud.ServiceClient, id string, size int) (r ActionResult) { + b := map[string]interface{}{"resize": map[string]interface{}{"volume": map[string]int{"size": size}}} + _, r.Err = client.Post(actionURL(client, id), &b, nil, nil) + return +} + +// AttachConfigurationGroup will attach configuration group to the instance +func AttachConfigurationGroup(client *gophercloud.ServiceClient, instanceID string, configID string) (r ConfigurationResult) { + b := map[string]interface{}{"instance": map[string]interface{}{"configuration": configID}} + _, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &gophercloud.RequestOpts{OkCodes: []int{202}}) + return +} + +// DetachConfigurationGroup will dettach configuration group from the instance +func DetachConfigurationGroup(client *gophercloud.ServiceClient, instanceID string) (r ConfigurationResult) { + b := map[string]interface{}{"instance": map[string]interface{}{}} + _, r.Err = client.Put(resourceURL(client, instanceID), &b, nil, &gophercloud.RequestOpts{OkCodes: []int{202}}) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go new file mode 100644 index 000000000000..e47a740ce9be --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/results.go @@ -0,0 +1,219 @@ +package instances + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/datastores" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" +) + +// Volume represents information about an attached volume for a database instance. +type Volume struct { + // The size in GB of the volume + Size int + + Used float64 +} + +// Flavor represents (virtual) hardware configurations for server resources in a region. +type Flavor struct { + // The flavor's unique identifier. + ID string + // Links to access the flavor. + Links []gophercloud.Link +} + +// Fault describes the fault reason in more detail when a database instance has errored +type Fault struct { + // Indicates the time when the fault occured + Created time.Time `json:"-"` + + // A message describing the fault reason + Message string + + // More details about the fault, for example a stack trace. Only filled + // in for admin users. + Details string +} + +func (r *Fault) UnmarshalJSON(b []byte) error { + type tmp Fault + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Fault(s.tmp) + + r.Created = time.Time(s.Created) + + return nil +} + +// Instance represents a remote MySQL instance. +type Instance struct { + // Indicates the datetime that the instance was created + Created time.Time `json:"-"` + + // Indicates the most recent datetime that the instance was updated. + Updated time.Time `json:"-"` + + // Indicates the hardware flavor the instance uses. + Flavor Flavor + + // A DNS-resolvable hostname associated with the database instance (rather + // than an IPv4 address). Since the hostname always resolves to the correct + // IP address of the database instance, this relieves the user from the task + // of maintaining the mapping. Note that although the IP address may likely + // change on resizing, migrating, and so forth, the hostname always resolves + // to the correct database instance. + Hostname string + + // The IP addresses associated with the database instance + // Is empty if the instance has a hostname + IP []string + + // Indicates the unique identifier for the instance resource. + ID string + + // Exposes various links that reference the instance resource. + Links []gophercloud.Link + + // The human-readable name of the instance. + Name string + + // The build status of the instance. + Status string + + // Fault information (only available when the instance has errored) + Fault *Fault + + // Information about the attached volume of the instance. + Volume Volume + + // Indicates how the instance stores data. + Datastore datastores.DatastorePartial +} + +func (r *Instance) UnmarshalJSON(b []byte) error { + type tmp Instance + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Instance(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ConfigurationResult represents the result of a AttachConfigurationGroup/DetachConfigurationGroup operation. +type ConfigurationResult struct { + gophercloud.ErrResult +} + +// Extract will extract an Instance from various result structs. +func (r commonResult) Extract() (*Instance, error) { + var s struct { + Instance *Instance `json:"instance"` + } + err := r.ExtractInto(&s) + return s.Instance, err +} + +// InstancePage represents a single page of a paginated instance collection. +type InstancePage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page InstancePage) IsEmpty() (bool, error) { + instances, err := ExtractInstances(page) + return len(instances) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page InstancePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"instances_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractInstances will convert a generic pagination struct into a more +// relevant slice of Instance structs. +func ExtractInstances(r pagination.Page) ([]Instance, error) { + var s struct { + Instances []Instance `json:"instances"` + } + err := (r.(InstancePage)).ExtractInto(&s) + return s.Instances, err +} + +// EnableRootUserResult represents the result of an operation to enable the root user. +type EnableRootUserResult struct { + gophercloud.Result +} + +// Extract will extract root user information from a UserRootResult. +func (r EnableRootUserResult) Extract() (*users.User, error) { + var s struct { + User *users.User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ActionResult represents the result of action requests, such as: restarting +// an instance service, resizing its memory allocation, and resizing its +// attached volume size. +type ActionResult struct { + gophercloud.ErrResult +} + +// IsRootEnabledResult is the result of a call to IsRootEnabled. To see if +// root is enabled, call the type's Extract method. +type IsRootEnabledResult struct { + gophercloud.Result +} + +// Extract is used to extract the data from a IsRootEnabledResult. +func (r IsRootEnabledResult) Extract() (bool, error) { + return r.Body.(map[string]interface{})["rootEnabled"] == true, r.Err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/doc.go new file mode 100644 index 000000000000..386ac58aef20 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/doc.go @@ -0,0 +1,2 @@ +// db_instances_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/fixtures.go new file mode 100644 index 000000000000..3555a008ae95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/fixtures.go @@ -0,0 +1,256 @@ +package testing + +import ( + "fmt" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/db/v1/datastores" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +var ( + timestamp = "2015-11-12T14:22:42" + timeVal, _ = time.Parse(gophercloud.RFC3339NoZ, timestamp) +) + +var instance = ` +{ + "created": "` + timestamp + `", + "datastore": { + "type": "mysql", + "version": "5.6" + }, + "flavor": { + "id": "1", + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/flavors/1", + "rel": "self" + }, + { + "href": "https://openstack.example.com/v1.0/1234/flavors/1", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/instances/1", + "rel": "self" + } + ], + "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.openstack.example.com", + "id": "{instanceID}", + "name": "json_rack_instance", + "status": "BUILD", + "updated": "` + timestamp + `", + "volume": { + "size": 2 + } +} +` + +var createReq = ` +{ + "instance": { + "databases": [ + { + "character_set": "utf8", + "collate": "utf8_general_ci", + "name": "sampledb" + }, + { + "name": "nextround" + } + ], + "flavorRef": "1", + "name": "json_rack_instance", + "users": [ + { + "databases": [ + { + "name": "sampledb" + } + ], + "name": "demouser", + "password": "demopassword" + } + ], + "volume": { + "size": 2 + } + } +} +` + +var instanceWithFault = ` +{ + "created": "` + timestamp + `", + "datastore": { + "type": "mysql", + "version": "5.6" + }, + "flavor": { + "id": "1", + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/flavors/1", + "rel": "self" + }, + { + "href": "https://openstack.example.com/v1.0/1234/flavors/1", + "rel": "bookmark" + } + ] + }, + "links": [ + { + "href": "https://openstack.example.com/v1.0/1234/instances/1", + "rel": "self" + } + ], + "hostname": "e09ad9a3f73309469cf1f43d11e79549caf9acf2.openstack.example.com", + "id": "{instanceID}", + "name": "json_rack_instance", + "status": "BUILD", + "updated": "` + timestamp + `", + "volume": { + "size": 2 + }, + "fault": { + "message": "some error message", + "created": "` + timestamp + `", + "details": "some details about the error" + } +} +` + +var ( + instanceID = "{instanceID}" + configGroupID = "00000000-0000-0000-0000-000000000000" + rootURL = "/instances" + resURL = rootURL + "/" + instanceID + uRootURL = resURL + "/root" + aURL = resURL + "/action" +) + +var ( + restartReq = `{"restart": {}}` + resizeReq = `{"resize": {"flavorRef": "2"}}` + resizeVolReq = `{"resize": {"volume": {"size": 4}}}` + attachConfigurationGroupReq = `{"instance": {"configuration": "00000000-0000-0000-0000-000000000000"}}` + detachConfigurationGroupReq = `{"instance": {}}` +) + +var ( + createResp = fmt.Sprintf(`{"instance": %s}`, instance) + createWithFaultResp = fmt.Sprintf(`{"instance": %s}`, instanceWithFault) + listInstancesResp = fmt.Sprintf(`{"instances":[%s]}`, instance) + getInstanceResp = createResp + enableUserResp = `{"user":{"name":"root","password":"secretsecret"}}` + isUserEnabledResp = `{"rootEnabled":true}` +) + +var expectedInstance = instances.Instance{ + Created: timeVal, + Updated: timeVal, + Flavor: instances.Flavor{ + ID: "1", + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "bookmark"}, + }, + }, + Hostname: "e09ad9a3f73309469cf1f43d11e79549caf9acf2.openstack.example.com", + ID: instanceID, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/instances/1", Rel: "self"}, + }, + Name: "json_rack_instance", + Status: "BUILD", + Volume: instances.Volume{Size: 2}, + Datastore: datastores.DatastorePartial{ + Type: "mysql", + Version: "5.6", + }, +} + +var expectedInstanceWithFault = instances.Instance{ + Created: timeVal, + Updated: timeVal, + Flavor: instances.Flavor{ + ID: "1", + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "self"}, + {Href: "https://openstack.example.com/v1.0/1234/flavors/1", Rel: "bookmark"}, + }, + }, + Hostname: "e09ad9a3f73309469cf1f43d11e79549caf9acf2.openstack.example.com", + ID: instanceID, + Links: []gophercloud.Link{ + {Href: "https://openstack.example.com/v1.0/1234/instances/1", Rel: "self"}, + }, + Name: "json_rack_instance", + Status: "BUILD", + Volume: instances.Volume{Size: 2}, + Datastore: datastores.DatastorePartial{ + Type: "mysql", + Version: "5.6", + }, + Fault: &instances.Fault{ + Created: timeVal, + Message: "some error message", + Details: "some details about the error", + }, +} + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, rootURL, "POST", createReq, createResp, 200) +} + +func HandleCreateWithFault(t *testing.T) { + fixture.SetupHandler(t, rootURL, "POST", createReq, createWithFaultResp, 200) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, rootURL, "GET", "", listInstancesResp, 200) +} + +func HandleGet(t *testing.T) { + fixture.SetupHandler(t, resURL, "GET", "", getInstanceResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, resURL, "DELETE", "", "", 202) +} + +func HandleEnableRoot(t *testing.T) { + fixture.SetupHandler(t, uRootURL, "POST", "", enableUserResp, 200) +} + +func HandleIsRootEnabled(t *testing.T) { + fixture.SetupHandler(t, uRootURL, "GET", "", isUserEnabledResp, 200) +} + +func HandleRestart(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", restartReq, "", 202) +} + +func HandleResize(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", resizeReq, "", 202) +} + +func HandleResizeVol(t *testing.T) { + fixture.SetupHandler(t, aURL, "POST", resizeVolReq, "", 202) +} + +func HandleAttachConfigurationGroup(t *testing.T) { + fixture.SetupHandler(t, resURL, "PUT", attachConfigurationGroupReq, "", 202) +} + +func HandleDetachConfigurationGroup(t *testing.T) { + fixture.SetupHandler(t, resURL, "PUT", detachConfigurationGroupReq, "", 202) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/requests_test.go new file mode 100644 index 000000000000..6e6c4f127ed3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/testing/requests_test.go @@ -0,0 +1,182 @@ +package testing + +import ( + "testing" + + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/openstack/db/v1/instances" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := instances.CreateOpts{ + Name: "json_rack_instance", + FlavorRef: "1", + Databases: db.BatchCreateOpts{ + {CharSet: "utf8", Collate: "utf8_general_ci", Name: "sampledb"}, + {Name: "nextround"}, + }, + Users: users.BatchCreateOpts{ + { + Name: "demouser", + Password: "demopassword", + Databases: db.BatchCreateOpts{ + {Name: "sampledb"}, + }, + }, + }, + Size: 2, + } + + instance, err := instances.Create(fake.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expectedInstance, instance) +} + +func TestCreateWithFault(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateWithFault(t) + + opts := instances.CreateOpts{ + Name: "json_rack_instance", + FlavorRef: "1", + Databases: db.BatchCreateOpts{ + {CharSet: "utf8", Collate: "utf8_general_ci", Name: "sampledb"}, + {Name: "nextround"}, + }, + Users: users.BatchCreateOpts{ + { + Name: "demouser", + Password: "demopassword", + Databases: db.BatchCreateOpts{ + {Name: "sampledb"}, + }, + }, + }, + Size: 2, + } + + instance, err := instances.Create(fake.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expectedInstanceWithFault, instance) +} + +func TestInstanceList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + pages := 0 + err := instances.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := instances.ExtractInstances(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, []instances.Instance{expectedInstance}, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestGetInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGet(t) + + instance, err := instances.Get(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, &expectedInstance, instance) +} + +func TestDeleteInstance(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + res := instances.Delete(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestEnableRootUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleEnableRoot(t) + + expected := &users.User{Name: "root", Password: "secretsecret"} + user, err := instances.EnableRootUser(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expected, user) +} + +func TestIsRootEnabled(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleIsRootEnabled(t) + + isEnabled, err := instances.IsRootEnabled(fake.ServiceClient(), instanceID).Extract() + + th.AssertNoErr(t, err) + th.AssertEquals(t, true, isEnabled) +} + +func TestRestart(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRestart(t) + + res := instances.Restart(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} + +func TestResize(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleResize(t) + + res := instances.Resize(fake.ServiceClient(), instanceID, "2") + th.AssertNoErr(t, res.Err) +} + +func TestResizeVolume(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleResizeVol(t) + + res := instances.ResizeVolume(fake.ServiceClient(), instanceID, 4) + th.AssertNoErr(t, res.Err) +} + +func TestAttachConfigurationGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAttachConfigurationGroup(t) + + res := instances.AttachConfigurationGroup(fake.ServiceClient(), instanceID, configGroupID) + th.AssertNoErr(t, res.Err) +} + +func TestDetachConfigurationGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDetachConfigurationGroup(t) + + res := instances.DetachConfigurationGroup(fake.ServiceClient(), instanceID) + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go new file mode 100644 index 000000000000..76d1ca56d80e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/instances/urls.go @@ -0,0 +1,19 @@ +package instances + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("instances") +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id) +} + +func userRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "root") +} + +func actionURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("instances", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go new file mode 100644 index 000000000000..cf07832f3465 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/doc.go @@ -0,0 +1,3 @@ +// Package users provides information and interaction with the user API +// resource in the OpenStack Database service. +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go new file mode 100644 index 000000000000..d342de34470f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/requests.go @@ -0,0 +1,91 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the top-level interface for creating JSON maps. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the struct responsible for configuring a new user; often in the +// context of an instance. +type CreateOpts struct { + // Specifies a name for the user. Valid names can be composed + // of the following characters: letters (either case); numbers; these + // characters '@', '?', '#', ' ' but NEVER beginning a name string; '_' is + // permitted anywhere. Prohibited characters that are forbidden include: + // single quotes, double quotes, back quotes, semicolons, commas, backslashes, + // and forward slashes. Spaces at the front or end of a user name are also + // not permitted. + Name string `json:"name" required:"true"` + // Specifies a password for the user. + Password string `json:"password" required:"true"` + // An array of databases that this user will connect to. The + // "name" field is the only requirement for each option. + Databases db.BatchCreateOpts `json:"databases,omitempty"` + // Specifies the host from which a user is allowed to connect to + // the database. Possible values are a string containing an IPv4 address or + // "%" to allow connecting from any host. Optional; the default is "%". + Host string `json:"host,omitempty"` +} + +// ToMap is a convenience function for creating sub-maps for individual users. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + if opts.Name == "root" { + err := gophercloud.ErrInvalidInput{} + err.Argument = "users.CreateOpts.Name" + err.Value = "root" + err.Info = "root is a reserved user name and cannot be used" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "") +} + +// BatchCreateOpts allows multiple users to be created at once. +type BatchCreateOpts []CreateOpts + +// ToUserCreateMap will generate a JSON map. +func (opts BatchCreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + users := make([]map[string]interface{}, len(opts)) + for i, opt := range opts { + user, err := opt.ToMap() + if err != nil { + return nil, err + } + users[i] = user + } + return map[string]interface{}{"users": users}, nil +} + +// Create asynchronously provisions a new user for the specified database +// instance based on the configuration defined in CreateOpts. If databases are +// assigned for a particular user, the user will be granted all privileges +// for those specified databases. "root" is a reserved name and cannot be used. +func Create(client *gophercloud.ServiceClient, instanceID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, instanceID), &b, nil, nil) + return +} + +// List will list all the users associated with a specified database instance, +// along with their associated databases. This operation will not return any +// system users or administrators for a database. +func List(client *gophercloud.ServiceClient, instanceID string) pagination.Pager { + return pagination.NewPager(client, baseURL(client, instanceID), func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete a user from a specified database instance. +func Delete(client *gophercloud.ServiceClient, instanceID, userName string) (r DeleteResult) { + _, r.Err = client.Delete(userURL(client, instanceID, userName), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go new file mode 100644 index 000000000000..d12a681bdf0d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/results.go @@ -0,0 +1,62 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a database user +type User struct { + // The user name + Name string + + // The user password + Password string + + // The databases associated with this user + Databases []db.Database +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UserPage represents a single page of a paginated user collection. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks to see whether the collection is empty. +func (page UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(page) + return len(users) == 0, err +} + +// NextPageURL will retrieve the next page URL. +func (page UserPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"users_links"` + } + err := page.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractUsers will convert a generic pagination struct into a more +// relevant slice of User structs. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/doc.go new file mode 100644 index 000000000000..3c98966e3a39 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/doc.go @@ -0,0 +1,2 @@ +// db_users_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/fixtures.go new file mode 100644 index 000000000000..f49f46f93cf5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/fixtures.go @@ -0,0 +1,37 @@ +package testing + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud/testhelper/fixture" +) + +const user1 = ` +{"databases": [{"name": "databaseA"}],"name": "dbuser3"%s} +` + +const user2 = ` +{"databases": [{"name": "databaseB"},{"name": "databaseC"}],"name": "dbuser4"%s} +` + +var ( + instanceID = "{instanceID}" + _rootURL = "/instances/" + instanceID + "/users" + pUser1 = fmt.Sprintf(user1, `,"password":"secretsecret"`) + pUser2 = fmt.Sprintf(user2, `,"password":"secretsecret"`) + createReq = fmt.Sprintf(`{"users":[%s, %s]}`, pUser1, pUser2) + listResp = fmt.Sprintf(`{"users":[%s, %s]}`, fmt.Sprintf(user1, ""), fmt.Sprintf(user2, "")) +) + +func HandleCreate(t *testing.T) { + fixture.SetupHandler(t, _rootURL, "POST", createReq, "", 202) +} + +func HandleList(t *testing.T) { + fixture.SetupHandler(t, _rootURL, "GET", "", listResp, 200) +} + +func HandleDelete(t *testing.T) { + fixture.SetupHandler(t, _rootURL+"/{userName}", "DELETE", "", "", 202) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/requests_test.go new file mode 100644 index 000000000000..952f245eb7ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/testing/requests_test.go @@ -0,0 +1,85 @@ +package testing + +import ( + "testing" + + db "github.com/gophercloud/gophercloud/openstack/db/v1/databases" + "github.com/gophercloud/gophercloud/openstack/db/v1/users" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreate(t) + + opts := users.BatchCreateOpts{ + { + Databases: db.BatchCreateOpts{ + db.CreateOpts{Name: "databaseA"}, + }, + Name: "dbuser3", + Password: "secretsecret", + }, + { + Databases: db.BatchCreateOpts{ + {Name: "databaseB"}, + {Name: "databaseC"}, + }, + Name: "dbuser4", + Password: "secretsecret", + }, + } + + res := users.Create(fake.ServiceClient(), instanceID, opts) + th.AssertNoErr(t, res.Err) +} + +func TestUserList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleList(t) + + expectedUsers := []users.User{ + { + Databases: []db.Database{ + db.Database{Name: "databaseA"}, + }, + Name: "dbuser3", + }, + { + Databases: []db.Database{ + {Name: "databaseB"}, + {Name: "databaseC"}, + }, + Name: "dbuser4", + }, + } + + pages := 0 + err := users.List(fake.ServiceClient(), instanceID).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := users.ExtractUsers(page) + if err != nil { + return false, err + } + + th.CheckDeepEquals(t, expectedUsers, actual) + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDelete(t) + + res := users.Delete(fake.ServiceClient(), instanceID, "{userName}") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go new file mode 100644 index 000000000000..8c36a39b3036 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/db/v1/users/urls.go @@ -0,0 +1,11 @@ +package users + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, instanceID string) string { + return c.ServiceURL("instances", instanceID, "users") +} + +func userURL(c *gophercloud.ServiceClient, instanceID, userName string) string { + return c.ServiceURL("instances", instanceID, "users", userName) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go new file mode 100644 index 000000000000..617fafa63148 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/doc.go @@ -0,0 +1,54 @@ +/* +Package recordsets provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List RecordSets by Zone + + listOpts := recordsets.ListOpts{ + Type: "A", + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + allPages, err := recordsets.ListByZone(dnsClient, zoneID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRRs, err := recordsets.ExtractRecordSets(allPages() + if err != nil { + panic(err) + } + + for _, rr := range allRRs { + fmt.Printf("%+v\n", rr) + } + +Example to Create a RecordSet + + createOpts := recordsets.CreateOpts{ + Name: "example.com.", + Type: "A", + TTL: 3600, + Description: "This is a recordset.", + Records: []string{"10.1.0.2"}, + } + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + + rr, err := recordsets.Create(dnsClient, zoneID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a RecordSet + + zoneID := "fff121f5-c506-410a-a69e-2d73ef9cbdbd" + recordsetID := "d96ed01a-b439-4eb8-9b90-7a9f71017f7b" + + err := recordsets.Delete(dnsClient, zoneID, recordsetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package recordsets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go new file mode 100644 index 000000000000..2d6ecdc3dcfc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/requests.go @@ -0,0 +1,166 @@ +package recordsets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRecordSetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the recordset at which you want to set a marker. + Marker string `q:"marker"` + + Data string `q:"data"` + Description string `q:"description"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` + ZoneID string `q:"zone_id"` +} + +// ToRecordSetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRecordSetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListByZone implements the recordset list request. +func ListByZone(client *gophercloud.ServiceClient, zoneID string, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client, zoneID) + if opts != nil { + query, err := opts.ToRecordSetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RecordSetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get implements the recordset Get request. +func Get(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r GetResult) { + _, r.Err = client.Get(rrsetURL(client, zoneID, rrsetID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToRecordSetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the base attributes that may be used to create a +// RecordSet. +type CreateOpts struct { + // Name is the name of the RecordSet. + Name string `json:"name" required:"true"` + + // Description is a description of the RecordSet. + Description string `json:"description,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL int `json:"ttl,omitempty"` + + // Type is the RRTYPE of the RecordSet. + Type string `json:"type,omitempty"` +} + +// ToRecordSetCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToRecordSetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Create creates a recordset in a given zone. +func Create(client *gophercloud.ServiceClient, zoneID string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRecordSetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToRecordSetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// RecordSet. +type UpdateOpts struct { + // Description is a description of the RecordSet. + Description string `json:"description,omitempty"` + + // TTL is the time to live of the RecordSet. + TTL int `json:"ttl,omitempty"` + + // Records are the DNS records of the RecordSet. + Records []string `json:"records,omitempty"` +} + +// ToRecordSetUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToRecordSetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } else { + b["ttl"] = nil + } + + return b, nil +} + +// Update updates a recordset in a given zone +func Update(client *gophercloud.ServiceClient, zoneID string, rrsetID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRecordSetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(rrsetURL(client, zoneID, rrsetID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete removes an existing RecordSet. +func Delete(client *gophercloud.ServiceClient, zoneID string, rrsetID string) (r DeleteResult) { + _, r.Err = client.Delete(rrsetURL(client, zoneID, rrsetID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go new file mode 100644 index 000000000000..0fdc1fe52ea5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/results.go @@ -0,0 +1,147 @@ +package recordsets + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a RecordSet. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*RecordSet, error) { + var s *RecordSet + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create operation. Call its Extract method to +// interpret the result as a RecordSet. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get operation. Call its Extract method to +// interpret the result as a RecordSet. +type GetResult struct { + commonResult +} + +// RecordSetPage is a single page of RecordSet results. +type RecordSetPage struct { + pagination.LinkedPageBase +} + +// UpdateResult is result of an Update operation. Call its Extract method to +// interpret the result as a RecordSet. +type UpdateResult struct { + commonResult +} + +// DeleteResult is result of a Delete operation. Call its ExtractErr method to +// determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IsEmpty returns true if the page contains no results. +func (r RecordSetPage) IsEmpty() (bool, error) { + s, err := ExtractRecordSets(r) + return len(s) == 0, err +} + +// ExtractRecordSets extracts a slice of RecordSets from a List result. +func ExtractRecordSets(r pagination.Page) ([]RecordSet, error) { + var s struct { + RecordSets []RecordSet `json:"recordsets"` + } + err := (r.(RecordSetPage)).ExtractInto(&s) + return s.RecordSets, err +} + +// RecordSet represents a DNS Record Set. +type RecordSet struct { + // ID is the unique ID of the recordset + ID string `json:"id"` + + // ZoneID is the ID of the zone the recordset belongs to. + ZoneID string `json:"zone_id"` + + // ProjectID is the ID of the project that owns the recordset. + ProjectID string `json:"project_id"` + + // Name is the name of the recordset. + Name string `json:"name"` + + // ZoneName is the name of the zone the recordset belongs to. + ZoneName string `json:"zone_name"` + + // Type is the RRTYPE of the recordset. + Type string `json:"type"` + + // Records are the DNS records of the recordset. + Records []string `json:"records"` + + // TTL is the time to live of the recordset. + TTL int `json:"ttl"` + + // Status is the status of the recordset. + Status string `json:"status"` + + // Action is the current action in progress of the recordset. + Action string `json:"action"` + + // Description is the description of the recordset. + Description string `json:"description"` + + // Version is the revision of the recordset. + Version int `json:"version"` + + // CreatedAt is the date when the recordset was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the recordset was updated. + UpdatedAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, + // useful for passing along to other APIs that might want a recordset + // reference. + Links []gophercloud.Link `json:"-"` +} + +func (r *RecordSet) UnmarshalJSON(b []byte) error { + type tmp RecordSet + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + Links map[string]interface{} `json:"links"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = RecordSet(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + if s.Links != nil { + for rel, href := range s.Links { + if v, ok := href.(string); ok { + link := gophercloud.Link{ + Rel: rel, + Href: v, + } + r.Links = append(r.Links, link) + } + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/doc.go new file mode 100644 index 000000000000..f4d91dc23cc8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/doc.go @@ -0,0 +1,2 @@ +// recordsets unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/fixtures.go new file mode 100644 index 000000000000..24ca89c51800 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/fixtures.go @@ -0,0 +1,377 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListByZoneOutput is a sample response to a ListByZone call. +const ListByZoneOutput = ` +{ + "recordsets": [ + { + "description": "This is an example record set.", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648" + }, + "updated_at": null, + "records": [ + "10.1.0.2" + ], + "ttl": 3600, + "id": "f7b10e9b-0cae-4a91-b162-562bc6096648", + "name": "example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 1, + "type": "A", + "status": "PENDING", + "action": "CREATE" + }, + { + "description": "This is another example record set.", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/7423aeaf-b354-4bd7-8aba-2e831567b478" + }, + "updated_at": "2017-03-04T14:29:07.000000", + "records": [ + "10.1.0.3", + "10.1.0.4" + ], + "ttl": 3600, + "id": "7423aeaf-b354-4bd7-8aba-2e831567b478", + "name": "foo.example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 1, + "type": "A", + "status": "PENDING", + "action": "CREATE" + } + ], + "links": { + "self": "http://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets" + }, + "metadata": { + "total_count": 2 + } +} +` + +// ListByZoneOutputLimited is a sample response to a ListByZone call with a requested limit. +const ListByZoneOutputLimited = ` +{ + "recordsets": [ + { + "description": "This is another example record set.", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/7423aeaf-b354-4bd7-8aba-2e831567b478" + }, + "updated_at": "2017-03-04T14:29:07.000000", + "records": [ + "10.1.0.3", + "10.1.0.4" + ], + "ttl": 3600, + "id": "7423aeaf-b354-4bd7-8aba-2e831567b478", + "name": "foo.example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 1, + "type": "A", + "status": "PENDING", + "action": "CREATE" + } + ], + "links": { + "self": "http://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets?limit=1" + }, + "metadata": { + "total_count": 1 + } +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "description": "This is an example record set.", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648" + }, + "updated_at": null, + "records": [ + "10.1.0.2" + ], + "ttl": 3600, + "id": "f7b10e9b-0cae-4a91-b162-562bc6096648", + "name": "example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 1, + "type": "A", + "status": "PENDING", + "action": "CREATE" +} +` + +// NextPageRequest is a sample request to test pagination. +const NextPageRequest = ` +{ + "links": { + "self": "http://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets?limit=1", + "next": "http://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets?limit=1&marker=f7b10e9b-0cae-4a91-b162-562bc6096648" + } +} +` + +// FirstRecordSet is the first result in ListByZoneOutput +var FirstRecordSetCreatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2014-10-24T19:59:44.000000") +var FirstRecordSet = recordsets.RecordSet{ + ID: "f7b10e9b-0cae-4a91-b162-562bc6096648", + Description: "This is an example record set.", + UpdatedAt: time.Time{}, + Records: []string{"10.1.0.2"}, + TTL: 3600, + Name: "example.org.", + ProjectID: "4335d1f0-f793-11e2-b778-0800200c9a66", + ZoneID: "2150b1bf-dee2-4221-9d85-11f7886fb15f", + ZoneName: "example.com.", + CreatedAt: FirstRecordSetCreatedAt, + Version: 1, + Type: "A", + Status: "PENDING", + Action: "CREATE", + Links: []gophercloud.Link{ + { + Rel: "self", + Href: "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648", + }, + }, +} + +// SecondRecordSet is the first result in ListByZoneOutput +var SecondRecordSetCreatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2014-10-24T19:59:44.000000") +var SecondRecordSetUpdatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2017-03-04T14:29:07.000000") +var SecondRecordSet = recordsets.RecordSet{ + ID: "7423aeaf-b354-4bd7-8aba-2e831567b478", + Description: "This is another example record set.", + UpdatedAt: SecondRecordSetUpdatedAt, + Records: []string{"10.1.0.3", "10.1.0.4"}, + TTL: 3600, + Name: "foo.example.org.", + ProjectID: "4335d1f0-f793-11e2-b778-0800200c9a66", + ZoneID: "2150b1bf-dee2-4221-9d85-11f7886fb15f", + ZoneName: "example.com.", + CreatedAt: SecondRecordSetCreatedAt, + Version: 1, + Type: "A", + Status: "PENDING", + Action: "CREATE", + Links: []gophercloud.Link{ + { + Rel: "self", + Href: "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/7423aeaf-b354-4bd7-8aba-2e831567b478", + }, + }, +} + +// ExpectedRecordSetSlice is the slice of results that should be parsed +// from ListByZoneOutput, in the expected order. +var ExpectedRecordSetSlice = []recordsets.RecordSet{FirstRecordSet, SecondRecordSet} + +// ExpectedRecordSetSliceLimited is the slice of limited results that should be parsed +// from ListByZoneOutput. +var ExpectedRecordSetSliceLimited = []recordsets.RecordSet{SecondRecordSet} + +// HandleListByZoneSuccessfully configures the test server to respond to a ListByZone request. +func HandleListByZoneSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "f7b10e9b-0cae-4a91-b162-562bc6096648": + fmt.Fprintf(w, ListByZoneOutputLimited) + case "": + fmt.Fprintf(w, ListByZoneOutput) + } + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// CreateRecordSetRequest is a sample request to create a resource record. +const CreateRecordSetRequest = ` +{ + "name" : "example.org.", + "description" : "This is an example record set.", + "type" : "A", + "ttl" : 3600, + "records" : [ + "10.1.0.2" + ] +} +` + +// CreateRecordSetResponse is a sample response to a create request. +const CreateRecordSetResponse = ` +{ + "description": "This is an example record set.", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648" + }, + "updated_at": null, + "records": [ + "10.1.0.2" + ], + "ttl": 3600, + "id": "f7b10e9b-0cae-4a91-b162-562bc6096648", + "name": "example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 1, + "type": "A", + "status": "PENDING", + "action": "CREATE" +} +` + +// CreatedRecordSet is the expected created resource record. +var CreatedRecordSet = FirstRecordSet + +// HandleZoneCreationSuccessfully configures the test server to respond to a Create request. +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRecordSetRequest) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateRecordSetResponse) + }) +} + +// UpdateRecordSetRequest is a sample request to update a record set. +const UpdateRecordSetRequest = ` +{ + "description" : "Updated description", + "ttl" : null, + "records" : [ + "10.1.0.2", + "10.1.0.3" + ] +} +` + +// UpdateRecordSetResponse is a sample response to an update request. +const UpdateRecordSetResponse = ` +{ + "description": "Updated description", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648" + }, + "updated_at": null, + "records": [ + "10.1.0.2", + "10.1.0.3" + ], + "ttl": 3600, + "id": "f7b10e9b-0cae-4a91-b162-562bc6096648", + "name": "example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 2, + "type": "A", + "status": "PENDING", + "action": "UPDATE" +} +` + +// HandleUpdateSuccessfully configures the test server to respond to an Update request. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRecordSetRequest) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, UpdateRecordSetResponse) + }) +} + +// DeleteRecordSetResponse is a sample response to a delete request. +const DeleteRecordSetResponse = ` +{ + "description": "Updated description", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648" + }, + "updated_at": null, + "records": [ + "10.1.0.2", + "10.1.0.3", + ], + "ttl": null, + "id": "f7b10e9b-0cae-4a91-b162-562bc6096648", + "name": "example.org.", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "zone_id": "2150b1bf-dee2-4221-9d85-11f7886fb15f", + "zone_name": "example.com.", + "created_at": "2014-10-24T19:59:44.000000", + "version": 2, + "type": "A", + "status": "PENDING", + "action": "UPDATE" +} +` + +// HandleDeleteSuccessfully configures the test server to respond to an Delete request. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets/f7b10e9b-0cae-4a91-b162-562bc6096648", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + //w.Header().Add("Content-Type", "application/json") + //fmt.Fprintf(w, DeleteZoneResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/requests_test.go new file mode 100644 index 000000000000..21630152ffc9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/testing/requests_test.go @@ -0,0 +1,145 @@ +package testing + +import ( + "encoding/json" + "testing" + + "github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListByZone(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListByZoneSuccessfully(t) + + count := 0 + err := recordsets.ListByZone(client.ServiceClient(), "2150b1bf-dee2-4221-9d85-11f7886fb15f", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := recordsets.ExtractRecordSets(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRecordSetSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestListByZoneLimited(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListByZoneSuccessfully(t) + + count := 0 + listOpts := recordsets.ListOpts{ + Limit: 1, + Marker: "f7b10e9b-0cae-4a91-b162-562bc6096648", + } + err := recordsets.ListByZone(client.ServiceClient(), "2150b1bf-dee2-4221-9d85-11f7886fb15f", listOpts).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := recordsets.ExtractRecordSets(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRecordSetSliceLimited, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestListByZoneAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListByZoneSuccessfully(t) + + allPages, err := recordsets.ListByZone(client.ServiceClient(), "2150b1bf-dee2-4221-9d85-11f7886fb15f", nil).AllPages() + th.AssertNoErr(t, err) + allRecordSets, err := recordsets.ExtractRecordSets(allPages) + th.AssertNoErr(t, err) + th.CheckEquals(t, 2, len(allRecordSets)) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := recordsets.Get(client.ServiceClient(), "2150b1bf-dee2-4221-9d85-11f7886fb15f", "f7b10e9b-0cae-4a91-b162-562bc6096648").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstRecordSet, actual) +} + +func TestNextPageURL(t *testing.T) { + var page recordsets.RecordSetPage + var body map[string]interface{} + err := json.Unmarshal([]byte(NextPageRequest), &body) + if err != nil { + t.Fatalf("Error unmarshaling data into page body: %v", err) + } + page.Body = body + expected := "http://127.0.0.1:9001/v2/zones/2150b1bf-dee2-4221-9d85-11f7886fb15f/recordsets?limit=1&marker=f7b10e9b-0cae-4a91-b162-562bc6096648" + actual, err := page.NextPageURL() + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + createOpts := recordsets.CreateOpts{ + Name: "example.org.", + Type: "A", + TTL: 3600, + Description: "This is an example record set.", + Records: []string{"10.1.0.2"}, + } + + actual, err := recordsets.Create(client.ServiceClient(), "2150b1bf-dee2-4221-9d85-11f7886fb15f", createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedRecordSet, actual) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + updateOpts := recordsets.UpdateOpts{ + TTL: 0, + Description: "Updated description", + Records: []string{"10.1.0.2", "10.1.0.3"}, + } + + UpdatedRecordSet := CreatedRecordSet + UpdatedRecordSet.Status = "PENDING" + UpdatedRecordSet.Action = "UPDATE" + UpdatedRecordSet.Description = "Updated description" + UpdatedRecordSet.Records = []string{"10.1.0.2", "10.1.0.3"} + UpdatedRecordSet.Version = 2 + + actual, err := recordsets.Update(client.ServiceClient(), UpdatedRecordSet.ZoneID, UpdatedRecordSet.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &UpdatedRecordSet, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + DeletedRecordSet := CreatedRecordSet + DeletedRecordSet.Status = "PENDING" + DeletedRecordSet.Action = "UPDATE" + DeletedRecordSet.Description = "Updated description" + DeletedRecordSet.Records = []string{"10.1.0.2", "10.1.0.3"} + DeletedRecordSet.Version = 2 + + err := recordsets.Delete(client.ServiceClient(), DeletedRecordSet.ZoneID, DeletedRecordSet.ID).ExtractErr() + th.AssertNoErr(t, err) + //th.CheckDeepEquals(t, &DeletedZone, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go new file mode 100644 index 000000000000..5ec18d1bb78e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/recordsets/urls.go @@ -0,0 +1,11 @@ +package recordsets + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID, "recordsets") +} + +func rrsetURL(c *gophercloud.ServiceClient, zoneID string, rrsetID string) string { + return c.ServiceURL("zones", zoneID, "recordsets", rrsetID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go new file mode 100644 index 000000000000..7733155bcfaf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/doc.go @@ -0,0 +1,48 @@ +/* +Package zones provides information and interaction with the zone API +resource for the OpenStack DNS service. + +Example to List Zones + + listOpts := zones.ListOpts{ + Email: "jdoe@example.com", + } + + allPages, err := zones.List(dnsClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allZones, err := zones.ExtractZones(allPages) + if err != nil { + panic(err) + } + + for _, zone := range allZones { + fmt.Printf("%+v\n", zone) + } + +Example to Create a Zone + + createOpts := zones.CreateOpts{ + Name: "example.com.", + Email: "jdoe@example.com", + Type: "PRIMARY", + TTL: 7200, + Description: "This is a zone.", + } + + zone, err := zones.Create(dnsClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Zone + + zoneID := "99d10f68-5623-4491-91a0-6daafa32b60e" + err := zones.Delete(dnsClient, zoneID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package zones diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go new file mode 100644 index 000000000000..f87deadce742 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/requests.go @@ -0,0 +1,174 @@ +package zones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add parameters to the List request. +type ListOptsBuilder interface { + ToZoneListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// https://developer.openstack.org/api-ref/dns/ +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the zone at which you want to set a marker. + Marker string `q:"marker"` + + Description string `q:"description"` + Email string `q:"email"` + Name string `q:"name"` + SortDir string `q:"sort_dir"` + SortKey string `q:"sort_key"` + Status string `q:"status"` + TTL int `q:"ttl"` + Type string `q:"type"` +} + +// ToZoneListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToZoneListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List implements a zone List request. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := baseURL(client) + if opts != nil { + query, err := opts.ToZoneListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ZonePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns information about a zone, given its ID. +func Get(client *gophercloud.ServiceClient, zoneID string) (r GetResult) { + _, r.Err = client.Get(zoneURL(client, zoneID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional attributes to the +// Create request. +type CreateOptsBuilder interface { + ToZoneCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the attributes used to create a zone. +type CreateOpts struct { + // Attributes are settings that supply hints and filters for the zone. + Attributes map[string]string `json:"attributes,omitempty"` + + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // Description of the zone. + Description string `json:"description,omitempty"` + + // Name of the zone. + Name string `json:"name" required:"true"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Type specifies if this is a primary or secondary zone. + Type string `json:"type,omitempty"` +} + +// ToZoneCreateMap formats an CreateOpts structure into a request body. +func (opts CreateOpts) ToZoneCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Create implements a zone create request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToZoneCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(baseURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 202}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToZoneUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the attributes to update a zone. +type UpdateOpts struct { + // Email contact of the zone. + Email string `json:"email,omitempty"` + + // TTL is the time to live of the zone. + TTL int `json:"-"` + + // Masters specifies zone masters if this is a secondary zone. + Masters []string `json:"masters,omitempty"` + + // Description of the zone. + Description string `json:"description,omitempty"` +} + +// ToZoneUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToZoneUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.TTL > 0 { + b["ttl"] = opts.TTL + } + + return b, nil +} + +// Update implements a zone update request. +func Update(client *gophercloud.ServiceClient, zoneID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToZoneUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(zoneURL(client, zoneID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete implements a zone delete request. +func Delete(client *gophercloud.ServiceClient, zoneID string) (r DeleteResult) { + _, r.Err = client.Delete(zoneURL(client, zoneID), &gophercloud.RequestOpts{ + OkCodes: []int{202}, + JSONResponse: &r.Body, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go new file mode 100644 index 000000000000..a36eca7e2053 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/results.go @@ -0,0 +1,166 @@ +package zones + +import ( + "encoding/json" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a Zone. +// An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Zone, error) { + var s *Zone + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the result of a Create request. Call its Extract method +// to interpret the result as a Zone. +type CreateResult struct { + commonResult +} + +// GetResult is the result of a Get request. Call its Extract method +// to interpret the result as a Zone. +type GetResult struct { + commonResult +} + +// UpdateResult is the result of an Update request. Call its Extract method +// to interpret the result as a Zone. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method +// to determine if the request succeeded or failed. +type DeleteResult struct { + commonResult +} + +// ZonePage is a single page of Zone results. +type ZonePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the page contains no results. +func (r ZonePage) IsEmpty() (bool, error) { + s, err := ExtractZones(r) + return len(s) == 0, err +} + +// ExtractZones extracts a slice of Zones from a List result. +func ExtractZones(r pagination.Page) ([]Zone, error) { + var s struct { + Zones []Zone `json:"zones"` + } + err := (r.(ZonePage)).ExtractInto(&s) + return s.Zones, err +} + +// Zone represents a DNS zone. +type Zone struct { + // ID uniquely identifies this zone amongst all other zones, including those + // not accessible to the current tenant. + ID string `json:"id"` + + // PoolID is the ID for the pool hosting this zone. + PoolID string `json:"pool_id"` + + // ProjectID identifies the project/tenant owning this resource. + ProjectID string `json:"project_id"` + + // Name is the DNS Name for the zone. + Name string `json:"name"` + + // Email for the zone. Used in SOA records for the zone. + Email string `json:"email"` + + // Description for this zone. + Description string `json:"description"` + + // TTL is the Time to Live for the zone. + TTL int `json:"ttl"` + + // Serial is the current serial number for the zone. + Serial int `json:"-"` + + // Status is the status of the resource. + Status string `json:"status"` + + // Action is the current action in progress on the resource. + Action string `json:"action"` + + // Version of the resource. + Version int `json:"version"` + + // Attributes for the zone. + Attributes map[string]string `json:"attributes"` + + // Type of zone. Primary is controlled by Designate. + // Secondary zones are slaved from another DNS Server. + // Defaults to Primary. + Type string `json:"type"` + + // Masters is the servers for slave servers to get DNS information from. + Masters []string `json:"masters"` + + // CreatedAt is the date when the zone was created. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the date when the last change was made to the zone. + UpdatedAt time.Time `json:"-"` + + // TransferredAt is the last time an update was retrieved from the + // master servers. + TransferredAt time.Time `json:"-"` + + // Links includes HTTP references to the itself, useful for passing along + // to other APIs that might want a server reference. + Links map[string]interface{} `json:"links"` +} + +func (r *Zone) UnmarshalJSON(b []byte) error { + type tmp Zone + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + TransferredAt gophercloud.JSONRFC3339MilliNoZ `json:"transferred_at"` + Serial interface{} `json:"serial"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Zone(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + r.TransferredAt = time.Time(s.TransferredAt) + + switch t := s.Serial.(type) { + case float64: + r.Serial = int(t) + case string: + switch t { + case "": + r.Serial = 0 + default: + serial, err := strconv.ParseFloat(t, 64) + if err != nil { + return err + } + r.Serial = int(serial) + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/doc.go new file mode 100644 index 000000000000..b9b6286d7523 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/doc.go @@ -0,0 +1,2 @@ +// zones unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/fixtures.go new file mode 100644 index 000000000000..55e401306d36 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/fixtures.go @@ -0,0 +1,302 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// List Output is a sample response to a List call. +const ListOutput = ` +{ + "links": { + "self": "http://example.com:9001/v2/zones" + }, + "metadata": { + "total_count": 2 + }, + "zones": [ + { + "id": "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "example.org.", + "email": "joe@example.org", + "ttl": 7200, + "serial": 1404757531, + "status": "ACTIVE", + "action": "CREATE", + "description": "This is an example zone.", + "masters": [], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": null, + "links": { + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3" + } + }, + { + "id": "34c4561c-9205-4386-9df5-167436f5a222", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "foo.example.com.", + "email": "joe@foo.example.com", + "ttl": 7200, + "serial": 1488053571, + "status": "ACTIVE", + "action": "CREATE", + "description": "This is another example zone.", + "masters": ["example.com."], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": "2015-02-25T20:23:01.234567", + "links": { + "self": "https://127.0.0.1:9001/v2/zones/34c4561c-9205-4386-9df5-167436f5a222" + } + } + ] +} +` + +// GetOutput is a sample response to a Get call. +const GetOutput = ` +{ + "id": "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "example.org.", + "email": "joe@example.org", + "ttl": 7200, + "serial": 1404757531, + "status": "ACTIVE", + "action": "CREATE", + "description": "This is an example zone.", + "masters": [], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": null, + "links": { + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3" + } +} +` + +// FirstZone is the first result in ListOutput +var FirstZoneCreatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2014-07-07T18:25:31.275934") +var FirstZone = zones.Zone{ + ID: "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + PoolID: "572ba08c-d929-4c70-8e42-03824bb24ca2", + ProjectID: "4335d1f0-f793-11e2-b778-0800200c9a66", + Name: "example.org.", + Email: "joe@example.org", + TTL: 7200, + Serial: 1404757531, + Status: "ACTIVE", + Action: "CREATE", + Description: "This is an example zone.", + Masters: []string{}, + Type: "PRIMARY", + Version: 1, + CreatedAt: FirstZoneCreatedAt, + Links: map[string]interface{}{ + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + }, +} + +var SecondZoneCreatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2014-07-07T18:25:31.275934") +var SecondZoneUpdatedAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2015-02-25T20:23:01.234567") +var SecondZone = zones.Zone{ + ID: "34c4561c-9205-4386-9df5-167436f5a222", + PoolID: "572ba08c-d929-4c70-8e42-03824bb24ca2", + ProjectID: "4335d1f0-f793-11e2-b778-0800200c9a66", + Name: "foo.example.com.", + Email: "joe@foo.example.com", + TTL: 7200, + Serial: 1488053571, + Status: "ACTIVE", + Action: "CREATE", + Description: "This is another example zone.", + Masters: []string{"example.com."}, + Type: "PRIMARY", + Version: 1, + CreatedAt: SecondZoneCreatedAt, + UpdatedAt: SecondZoneUpdatedAt, + Links: map[string]interface{}{ + "self": "https://127.0.0.1:9001/v2/zones/34c4561c-9205-4386-9df5-167436f5a222", + }, +} + +// ExpectedZonesSlice is the slice of results that should be parsed +// from ListOutput, in the expected order. +var ExpectedZonesSlice = []zones.Zone{FirstZone, SecondZone} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a List request. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetOutput) + }) +} + +// CreateZoneRequest is a sample request to create a zone. +const CreateZoneRequest = ` +{ + "name": "example.org.", + "email": "joe@example.org", + "type": "PRIMARY", + "ttl": 7200, + "description": "This is an example zone." +} +` + +// CreateZoneResponse is a sample response to a create request. +const CreateZoneResponse = ` +{ + "id": "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "example.org.", + "email": "joe@example.org", + "ttl": 7200, + "serial": 1404757531, + "status": "ACTIVE", + "action": "CREATE", + "description": "This is an example zone.", + "masters": [], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": null, + "links": { + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3" + } +} +` + +// CreatedZone is the expected created zone +var CreatedZone = FirstZone + +// HandleZoneCreationSuccessfully configures the test server to respond to a Create request. +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateZoneRequest) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateZoneResponse) + }) +} + +// UpdateZoneRequest is a sample request to update a zone. +const UpdateZoneRequest = ` +{ + "ttl": 600, + "description": "Updated Description" +} +` + +// UpdateZoneResponse is a sample response to update a zone. +const UpdateZoneResponse = ` +{ + "id": "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "example.org.", + "email": "joe@example.org", + "ttl": 600, + "serial": 1404757531, + "status": "PENDING", + "action": "UPDATE", + "description": "Updated Description", + "masters": [], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": null, + "links": { + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3" + } +} +` + +// HandleZoneUpdateSuccessfully configures the test server to respond to an Update request. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateZoneRequest) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, UpdateZoneResponse) + }) +} + +// DeleteZoneResponse is a sample response to update a zone. +const DeleteZoneResponse = ` +{ + "id": "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + "pool_id": "572ba08c-d929-4c70-8e42-03824bb24ca2", + "project_id": "4335d1f0-f793-11e2-b778-0800200c9a66", + "name": "example.org.", + "email": "joe@example.org", + "ttl": 600, + "serial": 1404757531, + "status": "PENDING", + "action": "DELETE", + "description": "Updated Description", + "masters": [], + "type": "PRIMARY", + "transferred_at": null, + "version": 1, + "created_at": "2014-07-07T18:25:31.275934", + "updated_at": null, + "links": { + "self": "https://127.0.0.1:9001/v2/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3" + } +} +` + +// HandleZoneDeleteSuccessfully configures the test server to respond to an Delete request. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/zones/a86dba58-0043-4cc6-a1bb-69d5e86f3ca3", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, DeleteZoneResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/requests_test.go new file mode 100644 index 000000000000..412b34933f50 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/testing/requests_test.go @@ -0,0 +1,105 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/dns/v2/zones" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + count := 0 + err := zones.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := zones.ExtractZones(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedZonesSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestListAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + allPages, err := zones.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + allZones, err := zones.ExtractZones(allPages) + th.AssertNoErr(t, err) + th.CheckEquals(t, 2, len(allZones)) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := zones.Get(client.ServiceClient(), "a86dba58-0043-4cc6-a1bb-69d5e86f3ca3").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstZone, actual) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + createOpts := zones.CreateOpts{ + Name: "example.org.", + Email: "joe@example.org", + Type: "PRIMARY", + TTL: 7200, + Description: "This is an example zone.", + } + + actual, err := zones.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &CreatedZone, actual) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + updateOpts := zones.UpdateOpts{ + TTL: 600, + Description: "Updated Description", + } + + UpdatedZone := CreatedZone + UpdatedZone.Status = "PENDING" + UpdatedZone.Action = "UPDATE" + UpdatedZone.TTL = 600 + UpdatedZone.Description = "Updated Description" + + actual, err := zones.Update(client.ServiceClient(), UpdatedZone.ID, updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &UpdatedZone, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + DeletedZone := CreatedZone + DeletedZone.Status = "PENDING" + DeletedZone.Action = "DELETE" + DeletedZone.TTL = 600 + DeletedZone.Description = "Updated Description" + + actual, err := zones.Delete(client.ServiceClient(), DeletedZone.ID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &DeletedZone, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go new file mode 100644 index 000000000000..9bef7058096e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/dns/v2/zones/urls.go @@ -0,0 +1,11 @@ +package zones + +import "github.com/gophercloud/gophercloud" + +func baseURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("zones") +} + +func zoneURL(c *gophercloud.ServiceClient, zoneID string) string { + return c.ServiceURL("zones", zoneID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go new file mode 100644 index 000000000000..cedf1f4d3a38 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/doc.go @@ -0,0 +1,14 @@ +/* +Package openstack contains resources for the individual OpenStack projects +supported in Gophercloud. It also includes functions to authenticate to an +OpenStack cloud and for provisioning various service-level clients. + +Example of Creating a Service Client + + ao, err := openstack.AuthOptionsFromEnv() + provider, err := openstack.AuthenticatedClient(ao) + client, err := openstack.NewNetworkV2(client, gophercloud.EndpointOpts{ + Region: os.Getenv("OS_REGION_NAME"), + }) +*/ +package openstack diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go new file mode 100644 index 000000000000..12c8aebcf77c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/endpoint_location.go @@ -0,0 +1,107 @@ +package openstack + +import ( + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +/* +V2EndpointURL discovers the endpoint URL for a specific service from a +ServiceCatalog acquired during the v2 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V2EndpointURL(catalog *tokens2.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Name if provided, and Region if provided. + var endpoints = make([]tokens2.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Region == "" || endpoint.Region == opts.Region { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + err := &ErrMultipleMatchingEndpointsV2{} + err.Endpoints = endpoints + return "", err + } + + // Extract the appropriate URL from the matching Endpoint. + for _, endpoint := range endpoints { + switch opts.Availability { + case gophercloud.AvailabilityPublic: + return gophercloud.NormalizeURL(endpoint.PublicURL), nil + case gophercloud.AvailabilityInternal: + return gophercloud.NormalizeURL(endpoint.InternalURL), nil + case gophercloud.AvailabilityAdmin: + return gophercloud.NormalizeURL(endpoint.AdminURL), nil + default: + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} + +/* +V3EndpointURL discovers the endpoint URL for a specific service from a Catalog +acquired during the v3 identity service. + +The specified EndpointOpts are used to identify a unique, unambiguous endpoint +to return. It's an error both when multiple endpoints match the provided +criteria and when none do. The minimum that can be specified is a Type, but you +will also often need to specify a Name and/or a Region depending on what's +available on your OpenStack deployment. +*/ +func V3EndpointURL(catalog *tokens3.ServiceCatalog, opts gophercloud.EndpointOpts) (string, error) { + // Extract Endpoints from the catalog entries that match the requested Type, Interface, + // Name if provided, and Region if provided. + var endpoints = make([]tokens3.Endpoint, 0, 1) + for _, entry := range catalog.Entries { + if (entry.Type == opts.Type) && (opts.Name == "" || entry.Name == opts.Name) { + for _, endpoint := range entry.Endpoints { + if opts.Availability != gophercloud.AvailabilityAdmin && + opts.Availability != gophercloud.AvailabilityPublic && + opts.Availability != gophercloud.AvailabilityInternal { + err := &ErrInvalidAvailabilityProvided{} + err.Argument = "Availability" + err.Value = opts.Availability + return "", err + } + if (opts.Availability == gophercloud.Availability(endpoint.Interface)) && + (opts.Region == "" || endpoint.Region == opts.Region || endpoint.RegionID == opts.Region) { + endpoints = append(endpoints, endpoint) + } + } + } + } + + // Report an error if the options were ambiguous. + if len(endpoints) > 1 { + return "", ErrMultipleMatchingEndpointsV3{Endpoints: endpoints} + } + + // Extract the URL from the matching Endpoint. + for _, endpoint := range endpoints { + return gophercloud.NormalizeURL(endpoint.URL), nil + } + + // Report an error if there were no matching endpoints. + err := &gophercloud.ErrEndpointNotFound{} + return "", err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go new file mode 100644 index 000000000000..df410b1c6114 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/errors.go @@ -0,0 +1,71 @@ +package openstack + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" +) + +// ErrEndpointNotFound is the error when no suitable endpoint can be found +// in the user's catalog +type ErrEndpointNotFound struct{ gophercloud.BaseError } + +func (e ErrEndpointNotFound) Error() string { + return "No suitable endpoint could be found in the service catalog." +} + +// ErrInvalidAvailabilityProvided is the error when an invalid endpoint +// availability is provided +type ErrInvalidAvailabilityProvided struct{ gophercloud.ErrInvalidInput } + +func (e ErrInvalidAvailabilityProvided) Error() string { + return fmt.Sprintf("Unexpected availability in endpoint query: %s", e.Value) +} + +// ErrMultipleMatchingEndpointsV2 is the error when more than one endpoint +// for the given options is found in the v2 catalog +type ErrMultipleMatchingEndpointsV2 struct { + gophercloud.BaseError + Endpoints []tokens2.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV2) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrMultipleMatchingEndpointsV3 is the error when more than one endpoint +// for the given options is found in the v3 catalog +type ErrMultipleMatchingEndpointsV3 struct { + gophercloud.BaseError + Endpoints []tokens3.Endpoint +} + +func (e ErrMultipleMatchingEndpointsV3) Error() string { + return fmt.Sprintf("Discovered %d matching endpoints: %#v", len(e.Endpoints), e.Endpoints) +} + +// ErrNoAuthURL is the error when the OS_AUTH_URL environment variable is not +// found +type ErrNoAuthURL struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoAuthURL) Error() string { + return "Environment variable OS_AUTH_URL needs to be set." +} + +// ErrNoUsername is the error when the OS_USERNAME environment variable is not +// found +type ErrNoUsername struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoUsername) Error() string { + return "Environment variable OS_USERNAME needs to be set." +} + +// ErrNoPassword is the error when the OS_PASSWORD environment variable is not +// found +type ErrNoPassword struct{ gophercloud.ErrInvalidInput } + +func (e ErrNoPassword) Error() string { + return "Environment variable OS_PASSWORD needs to be set." +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/doc.go new file mode 100644 index 000000000000..6aee4eae5651 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/doc.go @@ -0,0 +1,56 @@ +/* +Package roles provides functionality to interact with and control roles on +the API. + +A role represents a personality that a user can assume when performing a +specific set of operations. If a role includes a set of rights and +privileges, a user assuming that role inherits those rights and privileges. + +When a token is generated, the list of roles that user can assume is returned +back to them. Services that are being called by that user determine how they +interpret the set of roles a user has and to which operations or resources +each role grants access. + +It is up to individual services such as Compute or Image to assign meaning +to these roles. As far as the Identity service is concerned, a role is an +arbitrary name assigned by the user. + +Example to List Roles + + allPages, err := roles.List(identityClient).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to Grant a Role to a User + + tenantID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.AddUser(identityClient, tenantID, userID, roleID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Remove a Role from a User + + tenantID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.DeleteUser(identityClient, tenantID, userID, roleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package roles diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go new file mode 100644 index 000000000000..50228c906656 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/requests.go @@ -0,0 +1,32 @@ +package roles + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List is the operation responsible for listing all available global roles +// that a user can adopt. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, rootURL(client), func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.SinglePageBase(r)} + }) +} + +// AddUser is the operation responsible for assigning a particular role to +// a user. This is confined to the scope of the user's tenant - so the tenant +// ID is a required argument. +func AddUser(client *gophercloud.ServiceClient, tenantID, userID, roleID string) (r UserRoleResult) { + _, r.Err = client.Put(userRoleURL(client, tenantID, userID, roleID), nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// DeleteUser is the operation responsible for deleting a particular role +// from a user. This is confined to the scope of the user's tenant - so the +// tenant ID is a required argument. +func DeleteUser(client *gophercloud.ServiceClient, tenantID, userID, roleID string) (r UserRoleResult) { + _, r.Err = client.Delete(userRoleURL(client, tenantID, userID, roleID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go new file mode 100644 index 000000000000..94eccd6fedcc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/results.go @@ -0,0 +1,48 @@ +package roles + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Role represents an API role resource. +type Role struct { + // ID is the unique ID for the role. + ID string + + // Name is the human-readable name of the role. + Name string + + // Description is the description of the role. + Description string + + // ServiceID is the associated service for this role. + ServiceID string +} + +// RolePage is a single page of a user Role collection. +type RolePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Roles contains any results. +func (r RolePage) IsEmpty() (bool, error) { + users, err := ExtractRoles(r) + return len(users) == 0, err +} + +// ExtractRoles returns a slice of roles contained in a single page of results. +func ExtractRoles(r pagination.Page) ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := (r.(RolePage)).ExtractInto(&s) + return s.Roles, err +} + +// UserRoleResult represents the result of either an AddUserRole or +// a DeleteUserRole operation. Call its ExtractErr method to determine +// if the request succeeded or failed. +type UserRoleResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/doc.go new file mode 100644 index 000000000000..f4c5dab5b7da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/doc.go @@ -0,0 +1,2 @@ +// roles unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/fixtures.go new file mode 100644 index 000000000000..498c1611d014 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/fixtures.go @@ -0,0 +1,48 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/OS-KSADM/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "roles": [ + { + "id": "123", + "name": "compute:admin", + "description": "Nova Administrator" + } + ] +} + `) + }) +} + +func MockAddUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusCreated) + }) +} + +func MockDeleteUserRoleResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/{tenant_id}/users/{user_id}/roles/OS-KSADM/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/requests_test.go new file mode 100644 index 000000000000..8cf539557c98 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/testing/requests_test.go @@ -0,0 +1,65 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListRoleResponse(t) + + count := 0 + + err := roles.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := roles.ExtractRoles(page) + if err != nil { + t.Errorf("Failed to extract users: %v", err) + return false, err + } + + expected := []roles.Role{ + { + ID: "123", + Name: "compute:admin", + Description: "Nova Administrator", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestAddUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAddUserRoleResponse(t) + + err := roles.AddUser(client.ServiceClient(), "{tenant_id}", "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} + +func TestDeleteUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteUserRoleResponse(t) + + err := roles.DeleteUser(client.ServiceClient(), "{tenant_id}", "{user_id}", "{role_id}").ExtractErr() + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go new file mode 100644 index 000000000000..e4661e8bf1f3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/admin/roles/urls.go @@ -0,0 +1,21 @@ +package roles + +import "github.com/gophercloud/gophercloud" + +const ( + ExtPath = "OS-KSADM" + RolePath = "roles" + UserPath = "users" +) + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(ExtPath, RolePath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(ExtPath, RolePath) +} + +func userRoleURL(c *gophercloud.ServiceClient, tenantID, userID, roleID string) string { + return c.ServiceURL("tenants", tenantID, UserPath, userID, RolePath, ExtPath, roleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/delegate.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/delegate.go new file mode 100644 index 000000000000..cf6cc816da8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/delegate.go @@ -0,0 +1,46 @@ +package extensions + +import ( + "github.com/gophercloud/gophercloud" + common "github.com/gophercloud/gophercloud/openstack/common/extensions" + "github.com/gophercloud/gophercloud/pagination" +) + +// ExtensionPage is a single page of Extension results. +type ExtensionPage struct { + common.ExtensionPage +} + +// IsEmpty returns true if the current page contains at least one Extension. +func (page ExtensionPage) IsEmpty() (bool, error) { + is, err := ExtractExtensions(page) + return len(is) == 0, err +} + +// ExtractExtensions accepts a Page struct, specifically an ExtensionPage struct, and extracts the +// elements into a slice of Extension structs. +func ExtractExtensions(page pagination.Page) ([]common.Extension, error) { + // Identity v2 adds an intermediate "values" object. + var s struct { + Extensions struct { + Values []common.Extension `json:"values"` + } `json:"extensions"` + } + err := page.(ExtensionPage).ExtractInto(&s) + return s.Extensions.Values, err +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) common.GetResult { + return common.Get(c, alias) +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c).WithPageCreator(func(r pagination.PageResult) pagination.Page { + return ExtensionPage{ + ExtensionPage: common.ExtensionPage{SinglePageBase: pagination.SinglePageBase(r)}, + } + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/doc.go new file mode 100644 index 000000000000..791e4e391da3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/doc.go @@ -0,0 +1,3 @@ +// Package extensions provides information and interaction with the +// different extensions available for the OpenStack Identity service. +package extensions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/delegate_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/delegate_test.go new file mode 100644 index 000000000000..e7869d8d871e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/delegate_test.go @@ -0,0 +1,39 @@ +package testing + +import ( + "testing" + + common "github.com/gophercloud/gophercloud/openstack/common/extensions/testing" + "github.com/gophercloud/gophercloud/openstack/identity/v2/extensions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListExtensionsSuccessfully(t) + + count := 0 + err := extensions.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := extensions.ExtractExtensions(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, common.ExpectedExtensions, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + common.HandleGetExtensionSuccessfully(t) + + actual, err := extensions.Get(client.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, common.SingleExtension, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/doc.go new file mode 100644 index 000000000000..3c5d45926323 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/doc.go @@ -0,0 +1,2 @@ +// extensions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/fixtures.go new file mode 100644 index 000000000000..60afb747b6a6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/extensions/testing/fixtures.go @@ -0,0 +1,58 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single Extension result. It differs from the delegated implementation +// by the introduction of an intermediate "values" member. +const ListOutput = ` +{ + "extensions": { + "values": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] + } +} +` + +// HandleListExtensionsSuccessfully creates an HTTP handler that returns ListOutput for a List +// call. +func HandleListExtensionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": { + "values": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] + } +} + `) + }) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go new file mode 100644 index 000000000000..45623369e18e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/doc.go @@ -0,0 +1,65 @@ +/* +Package tenants provides information and interaction with the +tenants API resource for the OpenStack Identity service. + +See http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 +and http://developer.openstack.org/api-ref-identity-v2.html#admin-tenants +for more information. + +Example to List Tenants + + listOpts := tenants.ListOpts{ + Limit: 2, + } + + allPages, err := tenants.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTenants, err := tenants.ExtractTenants(allPages) + if err != nil { + panic(err) + } + + for _, tenant := range allTenants { + fmt.Printf("%+v\n", tenant) + } + +Example to Create a Tenant + + createOpts := tenants.CreateOpts{ + Name: "tenant_name", + Description: "this is a tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + updateOpts := tenants.UpdateOpts{ + Description: "this is a new description", + Enabled: gophercloud.Disabled, + } + + tenant, err := tenants.Update(identityClient, tenantID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Tenant + + tenantID := "e6db6ed6277c461a853458589063b295" + + err := tenants.Delete(identitYClient, tenantID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package tenants diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go new file mode 100644 index 000000000000..60f58c8ce392 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/requests.go @@ -0,0 +1,116 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts filters the Tenants that are returned by the List call. +type ListOpts struct { + // Marker is the ID of the last Tenant on the previous page. + Marker string `q:"marker"` + + // Limit specifies the page size. + Limit int `q:"limit"` +} + +// List enumerates the Tenants to which the current token has access. +func List(client *gophercloud.ServiceClient, opts *ListOpts) pagination.Pager { + url := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + url += q.String() + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return TenantPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOpts represents the options needed when creating new tenant. +type CreateOpts struct { + // Name is the name of the tenant. + Name string `json:"name" required:"true"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// CreateOptsBuilder enables extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToTenantCreateMap() (map[string]interface{}, error) +} + +// ToTenantCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToTenantCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Create is the operation responsible for creating new tenant. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTenantCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single tenant by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToTenantUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an existing +// tenant. +type UpdateOpts struct { + // Name is the name of the tenant. + Name string `json:"name,omitempty"` + + // Description is the description of the tenant. + Description string `json:"description,omitempty"` + + // Enabled sets the tenant status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToTenantUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToTenantUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "tenant") +} + +// Update is the operation responsible for updating exist tenants by their TenantID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToTenantUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a tenant. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go new file mode 100644 index 000000000000..bb6c2c6b08a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/results.go @@ -0,0 +1,91 @@ +package tenants + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Tenant is a grouping of users in the identity service. +type Tenant struct { + // ID is a unique identifier for this tenant. + ID string `json:"id"` + + // Name is a friendlier user-facing name for this tenant. + Name string `json:"name"` + + // Description is a human-readable explanation of this Tenant's purpose. + Description string `json:"description"` + + // Enabled indicates whether or not a tenant is active. + Enabled bool `json:"enabled"` +} + +// TenantPage is a single page of Tenant results. +type TenantPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Tenants contains any results. +func (r TenantPage) IsEmpty() (bool, error) { + tenants, err := ExtractTenants(r) + return len(tenants) == 0, err +} + +// NextPageURL extracts the "next" link from the tenants_links section of the result. +func (r TenantPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"tenants_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// ExtractTenants returns a slice of Tenants contained in a single page of +// results. +func ExtractTenants(r pagination.Page) ([]Tenant, error) { + var s struct { + Tenants []Tenant `json:"tenants"` + } + err := (r.(TenantPage)).ExtractInto(&s) + return s.Tenants, err +} + +type tenantResult struct { + gophercloud.Result +} + +// Extract interprets any tenantResults as a Tenant. +func (r tenantResult) Extract() (*Tenant, error) { + var s struct { + Tenant *Tenant `json:"tenant"` + } + err := r.ExtractInto(&s) + return s.Tenant, err +} + +// GetResult is the response from a Get request. Call its Extract method to +// interpret it as a Tenant. +type GetResult struct { + tenantResult +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Tenant. +type CreateResult struct { + tenantResult +} + +// DeleteResult is the response from a Get request. Call its ExtractErr method +// to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response from a Update request. Call its Extract method +// to interpret it as a Tenant. +type UpdateResult struct { + tenantResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/doc.go new file mode 100644 index 000000000000..c08b8c37e036 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/doc.go @@ -0,0 +1,2 @@ +// tenants unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/fixtures.go new file mode 100644 index 000000000000..9a314704faaa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/fixtures.go @@ -0,0 +1,155 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Tenant results. +const ListOutput = ` +{ + "tenants": [ + { + "id": "1234", + "name": "Red Team", + "description": "The team that is red", + "enabled": true + }, + { + "id": "9876", + "name": "Blue Team", + "description": "The team that is blue", + "enabled": false + } + ] +} +` + +// RedTeam is a Tenant fixture. +var RedTeam = tenants.Tenant{ + ID: "1234", + Name: "Red Team", + Description: "The team that is red", + Enabled: true, +} + +// BlueTeam is a Tenant fixture. +var BlueTeam = tenants.Tenant{ + ID: "9876", + Name: "Blue Team", + Description: "The team that is blue", + Enabled: false, +} + +// ExpectedTenantSlice is the slice of tenants expected to be returned from ListOutput. +var ExpectedTenantSlice = []tenants.Tenant{RedTeam, BlueTeam} + +// HandleListTenantsSuccessfully creates an HTTP handler at `/tenants` on the test handler mux that +// responds with a list of two tenants. +func HandleListTenantsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/tenants", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +func mockCreateTenantResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "tenant": { + "name": "new_tenant", + "description": "This is new tenant", + "enabled": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "tenant": { + "name": "new_tenant", + "description": "This is new tenant", + "enabled": true, + "id": "5c62ef576dc7444cbb73b1fe84b97648" + } +} +`) + }) +} + +func mockDeleteTenantResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/2466f69cd4714d89a548a68ed97ffcd4", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func mockUpdateTenantResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/5c62ef576dc7444cbb73b1fe84b97648", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "tenant": { + "name": "new_name", + "description": "This is new name", + "enabled": true + } +} +`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "tenant": { + "name": "new_name", + "description": "This is new name", + "enabled": true, + "id": "5c62ef576dc7444cbb73b1fe84b97648" + } +} +`) + }) +} + +func mockGetTenantResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/5c62ef576dc7444cbb73b1fe84b97648", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "tenant": { + "name": "new_tenant", + "description": "This is new tenant", + "enabled": true, + "id": "5c62ef576dc7444cbb73b1fe84b97648" + } +} +`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/requests_test.go new file mode 100644 index 000000000000..86f2c94722a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/testing/requests_test.go @@ -0,0 +1,113 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListTenants(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListTenantsSuccessfully(t) + + count := 0 + err := tenants.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := tenants.ExtractTenants(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedTenantSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateTenant(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateTenantResponse(t) + + opts := tenants.CreateOpts{ + Name: "new_tenant", + Description: "This is new tenant", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Create(client.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + + expected := &tenants.Tenant{ + Name: "new_tenant", + Description: "This is new tenant", + Enabled: true, + ID: "5c62ef576dc7444cbb73b1fe84b97648", + } + + th.AssertDeepEquals(t, expected, tenant) +} + +func TestDeleteTenant(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteTenantResponse(t) + + err := tenants.Delete(client.ServiceClient(), "2466f69cd4714d89a548a68ed97ffcd4").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateTenant(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateTenantResponse(t) + + id := "5c62ef576dc7444cbb73b1fe84b97648" + opts := tenants.UpdateOpts{ + Name: "new_name", + Description: "This is new name", + Enabled: gophercloud.Enabled, + } + + tenant, err := tenants.Update(client.ServiceClient(), id, opts).Extract() + + th.AssertNoErr(t, err) + + expected := &tenants.Tenant{ + Name: "new_name", + ID: id, + Description: "This is new name", + Enabled: true, + } + + th.AssertDeepEquals(t, expected, tenant) +} + +func TestGetTenant(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetTenantResponse(t) + + tenant, err := tenants.Get(client.ServiceClient(), "5c62ef576dc7444cbb73b1fe84b97648").Extract() + th.AssertNoErr(t, err) + + expected := &tenants.Tenant{ + Name: "new_tenant", + ID: "5c62ef576dc7444cbb73b1fe84b97648", + Description: "This is new tenant", + Enabled: true, + } + + th.AssertDeepEquals(t, expected, tenant) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go new file mode 100644 index 000000000000..0f026690790c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tenants/urls.go @@ -0,0 +1,23 @@ +package tenants + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func getURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tenants") +} + +func deleteURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} + +func updateURL(client *gophercloud.ServiceClient, tenantID string) string { + return client.ServiceURL("tenants", tenantID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go new file mode 100644 index 000000000000..5375eea87267 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/doc.go @@ -0,0 +1,46 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v2.html#identity-auth-v2 + +Example to Create an Unscoped Token from a Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "pass" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant ID and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantID: "fc394f2ab2df4114bde39905f800dc57" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Tenant Name and Password + + authOpts := gophercloud.AuthOptions{ + Username: "user", + Password: "password", + TenantName: "tenantname" + } + + token, err := tokens.Create(identityClient, authOpts).ExtractToken() + if err != nil { + panic(err) + } +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go new file mode 100644 index 000000000000..ab32368cc6ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/requests.go @@ -0,0 +1,103 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// PasswordCredentialsV2 represents the required options to authenticate +// with a username and password. +type PasswordCredentialsV2 struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` +} + +// TokenCredentialsV2 represents the required options to authenticate +// with a token. +type TokenCredentialsV2 struct { + ID string `json:"id,omitempty" required:"true"` +} + +// AuthOptionsV2 wraps a gophercloud AuthOptions in order to adhere to the +// AuthOptionsBuilder interface. +type AuthOptionsV2 struct { + PasswordCredentials *PasswordCredentialsV2 `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) + // with an authentication token ID. + TokenCredentials *TokenCredentialsV2 `json:"token,omitempty" xor:"PasswordCredentials"` +} + +// AuthOptionsBuilder allows extensions to add additional parameters to the +// token create request. +type AuthOptionsBuilder interface { + // ToTokenCreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV2CreateMap() (map[string]interface{}, error) +} + +// AuthOptions are the valid options for Openstack Identity v2 authentication. +// For field descriptions, see gophercloud.AuthOptions. +type AuthOptions struct { + IdentityEndpoint string `json:"-"` + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + AllowReauth bool `json:"-"` + TokenID string +} + +// ToTokenV2CreateMap builds a token request body from the given AuthOptions. +func (opts AuthOptions) ToTokenV2CreateMap() (map[string]interface{}, error) { + v2Opts := AuthOptionsV2{ + TenantID: opts.TenantID, + TenantName: opts.TenantName, + } + + if opts.Password != "" { + v2Opts.PasswordCredentials = &PasswordCredentialsV2{ + Username: opts.Username, + Password: opts.Password, + } + } else { + v2Opts.TokenCredentials = &TokenCredentialsV2{ + ID: opts.TokenID, + } + } + + b, err := gophercloud.BuildRequestBody(v2Opts, "auth") + if err != nil { + return nil, err + } + return b, nil +} + +// Create authenticates to the identity service and attempts to acquire a Token. +// Generally, rather than interact with this call directly, end users should +// call openstack.AuthenticatedClient(), which abstracts all of the gory details +// about navigating service catalogs and such. +func Create(client *gophercloud.ServiceClient, auth AuthOptionsBuilder) (r CreateResult) { + b, err := auth.ToTokenV2CreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(CreateURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + return +} + +// Get validates and retrieves information for user's token. +func Get(client *gophercloud.ServiceClient, token string) (r GetResult) { + _, r.Err = client.Get(GetURL(client, token), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 203}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go new file mode 100644 index 000000000000..b11326772b12 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/results.go @@ -0,0 +1,159 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" +) + +// Token provides only the most basic information related to an authentication +// token. +type Token struct { + // ID provides the primary means of identifying a user to the OpenStack API. + // OpenStack defines this field as an opaque value, so do not depend on its + // content. It is safe, however, to compare for equality. + ID string + + // ExpiresAt provides a timestamp in ISO 8601 format, indicating when the + // authentication token becomes invalid. After this point in time, future + // API requests made using this authentication token will respond with + // errors. Either the caller will need to reauthenticate manually, or more + // preferably, the caller should exploit automatic re-authentication. + // See the AuthOptions structure for more details. + ExpiresAt time.Time + + // Tenant provides information about the tenant to which this token grants + // access. + Tenant tenants.Tenant +} + +// Role is a role for a user. +type Role struct { + Name string `json:"name"` +} + +// User is an OpenStack user. +type User struct { + ID string `json:"id"` + Name string `json:"name"` + UserName string `json:"username"` + Roles []Role `json:"roles"` +} + +// Endpoint represents a single API endpoint offered by a service. +// It provides the public and internal URLs, if supported, along with a region +// specifier, again if provided. +// +// The significance of the Region field will depend upon your provider. +// +// In addition, the interface offered by the service will have version +// information associated with it through the VersionId, VersionInfo, and +// VersionList fields, if provided or supported. +// +// In all cases, fields which aren't supported by the provider and service +// combined will assume a zero-value (""). +type Endpoint struct { + TenantID string `json:"tenantId"` + PublicURL string `json:"publicURL"` + InternalURL string `json:"internalURL"` + AdminURL string `json:"adminURL"` + Region string `json:"region"` + VersionID string `json:"versionId"` + VersionInfo string `json:"versionInfo"` + VersionList string `json:"versionList"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V2 service +// catalog listing. +// +// Each class of service, such as cloud DNS or block storage services, will have +// a single CatalogEntry representing it. +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may assign + // their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry +} + +// CreateResult is the response from a Create request. Use ExtractToken() to +// interpret it as a Token, or ExtractServiceCatalog() to interpret it as a +// service catalog. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the deferred response from a Get call, which is the same with a +// Created token. Use ExtractUser() to interpret it as a User. +type GetResult struct { + CreateResult +} + +// ExtractToken returns the just-created Token from a CreateResult. +func (r CreateResult) ExtractToken() (*Token, error) { + var s struct { + Access struct { + Token struct { + Expires string `json:"expires"` + ID string `json:"id"` + Tenant tenants.Tenant `json:"tenant"` + } `json:"token"` + } `json:"access"` + } + + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + expiresTs, err := time.Parse(gophercloud.RFC3339Milli, s.Access.Token.Expires) + if err != nil { + return nil, err + } + + return &Token{ + ID: s.Access.Token.ID, + ExpiresAt: expiresTs, + Tenant: s.Access.Token.Tenant, + }, nil +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r CreateResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s struct { + Access struct { + Entries []CatalogEntry `json:"serviceCatalog"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &ServiceCatalog{Entries: s.Access.Entries}, err +} + +// ExtractUser returns the User from a GetResult. +func (r GetResult) ExtractUser() (*User, error) { + var s struct { + Access struct { + User User `json:"user"` + } `json:"access"` + } + err := r.ExtractInto(&s) + return &s.Access.User, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/doc.go new file mode 100644 index 000000000000..a7955a717ef3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/doc.go @@ -0,0 +1,2 @@ +// tokens unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/fixtures.go new file mode 100644 index 000000000000..0a8544ba8641 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/fixtures.go @@ -0,0 +1,194 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/identity/v2/tenants" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + th "github.com/gophercloud/gophercloud/testhelper" + thclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ExpectedToken is the token that should be parsed from TokenCreationResponse. +var ExpectedToken = &tokens.Token{ + ID: "aaaabbbbccccdddd", + ExpiresAt: time.Date(2014, time.January, 31, 15, 30, 58, 0, time.UTC), + Tenant: tenants.Tenant{ + ID: "fc394f2ab2df4114bde39905f800dc57", + Name: "test", + Description: "There are many tenants. This one is yours.", + Enabled: true, + }, +} + +// ExpectedServiceCatalog is the service catalog that should be parsed from TokenCreationResponse. +var ExpectedServiceCatalog = &tokens.ServiceCatalog{ + Entries: []tokens.CatalogEntry{ + { + Name: "inscrutablewalrus", + Type: "something", + Endpoints: []tokens.Endpoint{ + { + PublicURL: "http://something0:1234/v2/", + Region: "region0", + }, + { + PublicURL: "http://something1:1234/v2/", + Region: "region1", + }, + }, + }, + { + Name: "arbitrarypenguin", + Type: "else", + Endpoints: []tokens.Endpoint{ + { + PublicURL: "http://else0:4321/v3/", + Region: "region0", + }, + }, + }, + }, +} + +// ExpectedUser is the token that should be parsed from TokenGetResponse. +var ExpectedUser = &tokens.User{ + ID: "a530fefc3d594c4ba2693a4ecd6be74e", + Name: "apiserver", + Roles: []tokens.Role{tokens.Role{Name: "member"}, tokens.Role{Name: "service"}}, + UserName: "apiserver", +} + +// TokenCreationResponse is a JSON response that contains ExpectedToken and ExpectedServiceCatalog. +const TokenCreationResponse = ` +{ + "access": { + "token": { + "issued_at": "2014-01-30T15:30:58.000000Z", + "expires": "2014-01-31T15:30:58Z", + "id": "aaaabbbbccccdddd", + "tenant": { + "description": "There are many tenants. This one is yours.", + "enabled": true, + "id": "fc394f2ab2df4114bde39905f800dc57", + "name": "test" + } + }, + "serviceCatalog": [ + { + "endpoints": [ + { + "publicURL": "http://something0:1234/v2/", + "region": "region0" + }, + { + "publicURL": "http://something1:1234/v2/", + "region": "region1" + } + ], + "type": "something", + "name": "inscrutablewalrus" + }, + { + "endpoints": [ + { + "publicURL": "http://else0:4321/v3/", + "region": "region0" + } + ], + "type": "else", + "name": "arbitrarypenguin" + } + ] + } +} +` + +// TokenGetResponse is a JSON response that contains ExpectedToken and ExpectedUser. +const TokenGetResponse = ` +{ + "access": { + "token": { + "issued_at": "2014-01-30T15:30:58.000000Z", + "expires": "2014-01-31T15:30:58Z", + "id": "aaaabbbbccccdddd", + "tenant": { + "description": "There are many tenants. This one is yours.", + "enabled": true, + "id": "fc394f2ab2df4114bde39905f800dc57", + "name": "test" + } + }, + "serviceCatalog": [], + "user": { + "id": "a530fefc3d594c4ba2693a4ecd6be74e", + "name": "apiserver", + "roles": [ + { + "name": "member" + }, + { + "name": "service" + } + ], + "roles_links": [], + "username": "apiserver" + } + } +}` + +// HandleTokenPost expects a POST against a /tokens handler, ensures that the request body has been +// constructed properly given certain auth options, and returns the result. +func HandleTokenPost(t *testing.T, requestJSON string) { + th.Mux.HandleFunc("/tokens", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + if requestJSON != "" { + th.TestJSONRequest(t, r, requestJSON) + } + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, TokenCreationResponse) + }) +} + +// HandleTokenGet expects a Get against a /tokens handler, ensures that the request body has been +// constructed properly given certain auth options, and returns the result. +func HandleTokenGet(t *testing.T, token string) { + th.Mux.HandleFunc("/tokens/"+token, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", thclient.TokenID) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, TokenGetResponse) + }) +} + +// IsSuccessful ensures that a CreateResult was successful and contains the correct token and +// service catalog. +func IsSuccessful(t *testing.T, result tokens.CreateResult) { + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedToken, token) + + serviceCatalog, err := result.ExtractServiceCatalog() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServiceCatalog, serviceCatalog) +} + +// GetIsSuccessful ensures that a GetResult was successful and contains the correct token and +// User Info. +func GetIsSuccessful(t *testing.T, result tokens.GetResult) { + token, err := result.ExtractToken() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedToken, token) + + user, err := result.ExtractUser() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedUser, user) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/requests_test.go new file mode 100644 index 000000000000..b687a929e043 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/testing/requests_test.go @@ -0,0 +1,104 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func tokenPost(t *testing.T, options gophercloud.AuthOptions, requestJSON string) tokens.CreateResult { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenPost(t, requestJSON) + + return tokens.Create(client.ServiceClient(), options) +} + +func tokenPostErr(t *testing.T, options gophercloud.AuthOptions, expectedErr error) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenPost(t, "") + + actualErr := tokens.Create(client.ServiceClient(), options).Err + th.CheckDeepEquals(t, expectedErr, actualErr) +} + +func TestCreateWithPassword(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "swordfish", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "passwordCredentials": { + "username": "me", + "password": "swordfish" + } + } + } + `)) +} + +func TestCreateTokenWithTenantID(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "opensesame", + TenantID: "fc394f2ab2df4114bde39905f800dc57", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "tenantId": "fc394f2ab2df4114bde39905f800dc57", + "passwordCredentials": { + "username": "me", + "password": "opensesame" + } + } + } + `)) +} + +func TestCreateTokenWithTenantName(t *testing.T) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "opensesame", + TenantName: "demo", + } + + IsSuccessful(t, tokenPost(t, options, ` + { + "auth": { + "tenantName": "demo", + "passwordCredentials": { + "username": "me", + "password": "opensesame" + } + } + } + `)) +} + +func TestRequireUsername(t *testing.T) { + options := gophercloud.AuthOptions{ + Password: "thing", + } + + tokenPostErr(t, options, gophercloud.ErrMissingInput{Argument: "Username"}) +} + +func tokenGet(t *testing.T, tokenId string) tokens.GetResult { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleTokenGet(t, tokenId) + return tokens.Get(client.ServiceClient(), tokenId) +} + +func TestGetWithToken(t *testing.T) { + GetIsSuccessful(t, tokenGet(t, "db22caf43c934e6c829087c41ff8d8d6")) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go new file mode 100644 index 000000000000..ee0a28f2004c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/tokens/urls.go @@ -0,0 +1,13 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// CreateURL generates the URL used to create new Tokens. +func CreateURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("tokens") +} + +// GetURL generates the URL used to Validate Tokens. +func GetURL(client *gophercloud.ServiceClient, token string) string { + return client.ServiceURL("tokens", token) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/doc.go new file mode 100644 index 000000000000..b72466a137a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/doc.go @@ -0,0 +1,75 @@ +/* +Package users provides information and interaction with the users API +resource for the OpenStack Identity Service. + +Example to List Users + + allPages, err := users.List(identityClient).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +Example to Create a User + + createOpts := users.CreateOpts{ + Name: "name", + TenantID: "c39e3de9be2d4c779f1dfd6abacc176d", + Enabled: gophercloud.Enabled, + } + + user, err := users.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a User + + userID := "9fe2ff9ee4384b1894a90878d3e92bab" + + updateOpts := users.UpdateOpts{ + Name: "new_name", + Enabled: gophercloud.Disabled, + } + + user, err := users.Update(identityClient, userID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a User + + userID := "9fe2ff9ee4384b1894a90878d3e92bab" + err := users.Delete(identityClient, userID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List a User's Roles + + tenantID := "1d8b6120dcc640fda4fc9194ffc80273" + userID := "c39e3de9be2d4c779f1dfd6abacc176d" + + allPages, err := users.ListRoles(identityClient, tenantID, userID).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := users.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } +*/ +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/requests.go new file mode 100644 index 000000000000..0081e8a9f210 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/requests.go @@ -0,0 +1,113 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List lists the existing users. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, rootURL(client), func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.SinglePageBase(r)} + }) +} + +// CommonOpts are the parameters that are shared between CreateOpts and +// UpdateOpts +type CommonOpts struct { + // Either a name or username is required. When provided, the value must be + // unique or a 409 conflict error will be returned. If you provide a name but + // omit a username, the latter will be set to the former; and vice versa. + Name string `json:"name,omitempty"` + Username string `json:"username,omitempty"` + + // TenantID is the ID of the tenant to which you want to assign this user. + TenantID string `json:"tenantId,omitempty"` + + // Enabled indicates whether this user is enabled or not. + Enabled *bool `json:"enabled,omitempty"` + + // Email is the email address of this user. + Email string `json:"email,omitempty"` +} + +// CreateOpts represents the options needed when creating new users. +type CreateOpts CommonOpts + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// ToUserCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + if opts.Name == "" && opts.Username == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "users.CreateOpts.Name/users.CreateOpts.Username" + err.Info = "Either a Name or Username must be provided" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "user") +} + +// Create is the operation responsible for creating new users. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(rootURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Get requests details on a single user, either by ID or Name. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(ResourceURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts specifies the base attributes that may be updated on an +// existing server. +type UpdateOpts CommonOpts + +// ToUserUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "user") +} + +// Update is the operation responsible for updating exist users by their ID. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToUserUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(ResourceURL(client, id), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete is the operation responsible for permanently deleting a User. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(ResourceURL(client, id), nil) + return +} + +// ListRoles lists the existing roles that can be assigned to users. +func ListRoles(client *gophercloud.ServiceClient, tenantID, userID string) pagination.Pager { + return pagination.NewPager(client, listRolesURL(client, tenantID, userID), func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/results.go new file mode 100644 index 000000000000..9f62eee08551 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/results.go @@ -0,0 +1,114 @@ +package users + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a user resource that exists on the API. +type User struct { + // ID is the UUID for this user. + ID string + + // Name is the human name for this user. + Name string + + // Username is the username for this user. + Username string + + // Enabled indicates whether the user is enabled (true) or disabled (false). + Enabled bool + + // Email is the email address for this user. + Email string + + // TenantID is the ID of the tenant to which this user belongs. + TenantID string `json:"tenant_id"` +} + +// Role assigns specific responsibilities to users, allowing them to accomplish +// certain API operations whilst scoped to a service. +type Role struct { + // ID is the UUID of the role. + ID string + + // Name is the name of the role. + Name string +} + +// UserPage is a single page of a User collection. +type UserPage struct { + pagination.SinglePageBase +} + +// RolePage is a single page of a user Role collection. +type RolePage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a page of Users contains any results. +func (r UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(r) + return len(users) == 0, err +} + +// ExtractUsers returns a slice of Users contained in a single page of results. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} + +// IsEmpty determines whether or not a page of Roles contains any results. +func (r RolePage) IsEmpty() (bool, error) { + users, err := ExtractRoles(r) + return len(users) == 0, err +} + +// ExtractRoles returns a slice of Roles contained in a single page of results. +func ExtractRoles(r pagination.Page) ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := (r.(RolePage)).ExtractInto(&s) + return s.Roles, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a User, if possible. +func (r commonResult) Extract() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a User. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract method +// to interpret the result as a User. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a User. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/doc.go new file mode 100644 index 000000000000..4519dcad7aaa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/doc.go @@ -0,0 +1,2 @@ +// users unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/fixtures.go new file mode 100644 index 000000000000..8626da2af644 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/fixtures.go @@ -0,0 +1,163 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "users":[ + { + "id": "u1000", + "name": "John Smith", + "username": "jqsmith", + "email": "john.smith@example.org", + "enabled": true, + "tenant_id": "12345" + }, + { + "id": "u1001", + "name": "Jane Smith", + "username": "jqsmith", + "email": "jane.smith@example.org", + "enabled": true, + "tenant_id": "12345" + } + ] +} + `) + }) +} + +func mockCreateUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "name": "new_user", + "tenantId": "12345", + "enabled": false, + "email": "new_user@foo.com" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_user", + "tenant_id": "12345", + "enabled": false, + "email": "new_user@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockGetUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/new_user", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_user", + "tenant_id": "12345", + "enabled": false, + "email": "new_user@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockUpdateUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + th.TestJSONRequest(t, r, ` +{ + "user": { + "name": "new_name", + "enabled": true, + "email": "new_email@foo.com" + } +} +`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "user": { + "name": "new_name", + "tenant_id": "12345", + "enabled": true, + "email": "new_email@foo.com", + "id": "c39e3de9be2d4c779f1dfd6abacc176d" + } +} +`) + }) +} + +func mockDeleteUserResponse(t *testing.T) { + th.Mux.HandleFunc("/users/c39e3de9be2d4c779f1dfd6abacc176d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func mockListRolesResponse(t *testing.T) { + th.Mux.HandleFunc("/tenants/1d8b6120dcc640fda4fc9194ffc80273/users/c39e3de9be2d4c779f1dfd6abacc176d/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "roles": [ + { + "id": "9fe2ff9ee4384b1894a90878d3e92bab", + "name": "foo_role" + }, + { + "id": "1ea3d56793574b668e85960fbf651e13", + "name": "admin" + } + ] +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/requests_test.go new file mode 100644 index 000000000000..3cb047e2dad5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/testing/requests_test.go @@ -0,0 +1,161 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v2/users" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListUserResponse(t) + + count := 0 + + err := users.List(client.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := users.ExtractUsers(page) + th.AssertNoErr(t, err) + + expected := []users.User{ + { + ID: "u1000", + Name: "John Smith", + Username: "jqsmith", + Email: "john.smith@example.org", + Enabled: true, + TenantID: "12345", + }, + { + ID: "u1001", + Name: "Jane Smith", + Username: "jqsmith", + Email: "jane.smith@example.org", + Enabled: true, + TenantID: "12345", + }, + } + th.CheckDeepEquals(t, expected, actual) + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, count) +} + +func TestCreateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockCreateUserResponse(t) + + opts := users.CreateOpts{ + Name: "new_user", + TenantID: "12345", + Enabled: gophercloud.Disabled, + Email: "new_user@foo.com", + } + + user, err := users.Create(client.ServiceClient(), opts).Extract() + + th.AssertNoErr(t, err) + + expected := &users.User{ + Name: "new_user", + ID: "c39e3de9be2d4c779f1dfd6abacc176d", + Email: "new_user@foo.com", + Enabled: false, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestGetUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockGetUserResponse(t) + + user, err := users.Get(client.ServiceClient(), "new_user").Extract() + th.AssertNoErr(t, err) + + expected := &users.User{ + Name: "new_user", + ID: "c39e3de9be2d4c779f1dfd6abacc176d", + Email: "new_user@foo.com", + Enabled: false, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestUpdateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockUpdateUserResponse(t) + + id := "c39e3de9be2d4c779f1dfd6abacc176d" + opts := users.UpdateOpts{ + Name: "new_name", + Enabled: gophercloud.Enabled, + Email: "new_email@foo.com", + } + + user, err := users.Update(client.ServiceClient(), id, opts).Extract() + + th.AssertNoErr(t, err) + + expected := &users.User{ + Name: "new_name", + ID: id, + Email: "new_email@foo.com", + Enabled: true, + TenantID: "12345", + } + + th.AssertDeepEquals(t, expected, user) +} + +func TestDeleteUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockDeleteUserResponse(t) + + res := users.Delete(client.ServiceClient(), "c39e3de9be2d4c779f1dfd6abacc176d") + th.AssertNoErr(t, res.Err) +} + +func TestListingUserRoles(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + mockListRolesResponse(t) + + tenantID := "1d8b6120dcc640fda4fc9194ffc80273" + userID := "c39e3de9be2d4c779f1dfd6abacc176d" + + err := users.ListRoles(client.ServiceClient(), tenantID, userID).EachPage(func(page pagination.Page) (bool, error) { + actual, err := users.ExtractRoles(page) + th.AssertNoErr(t, err) + + expected := []users.Role{ + {ID: "9fe2ff9ee4384b1894a90878d3e92bab", Name: "foo_role"}, + {ID: "1ea3d56793574b668e85960fbf651e13", Name: "admin"}, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/urls.go new file mode 100644 index 000000000000..89f66f2799aa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v2/users/urls.go @@ -0,0 +1,21 @@ +package users + +import "github.com/gophercloud/gophercloud" + +const ( + tenantPath = "tenants" + userPath = "users" + rolePath = "roles" +) + +func ResourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(userPath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(userPath) +} + +func listRolesURL(c *gophercloud.ServiceClient, tenantID, userID string) string { + return c.ServiceURL(tenantPath, tenantID, userPath, userID, rolePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/doc.go new file mode 100644 index 000000000000..720db782f7a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/doc.go @@ -0,0 +1,59 @@ +/* +Package domains manages and retrieves Domains in the OpenStack Identity Service. + +Example to List Domains + + var iTrue bool = true + listOpts := domains.ListOpts{ + Enabled: &iTrue, + } + + allPages, err := domains.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allDomains, err := domains.ExtractDomains(allPages) + if err != nil { + panic(err) + } + + for _, domain := range allDomains { + fmt.Printf("%+v\n", domain) + } + +Example to Create a Domain + + createOpts := domains.CreateOpts{ + Name: "domain name", + Description: "Test domain", + } + + domain, err := domains.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Domain + + domainID := "0fe36e73809d46aeae6705c39077b1b3" + + var iFalse bool = false + updateOpts := domains.UpdateOpts{ + Enabled: &iFalse, + } + + domain, err := domains.Update(identityClient, domainID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Domain + + domainID := "0fe36e73809d46aeae6705c39077b1b3" + err := domains.Delete(identityClient, domainID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package domains diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/requests.go new file mode 100644 index 000000000000..14fbd27eb331 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/requests.go @@ -0,0 +1,126 @@ +package domains + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToDomainListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Enabled filters the response by enabled domains. + Enabled *bool `q:"enabled"` + + // Name filters the response by domain name. + Name string `q:"name"` +} + +// ToDomainListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToDomainListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the domains to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToDomainListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return DomainPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single domain, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToDomainCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a domain. +type CreateOpts struct { + // Name is the name of the new domain. + Name string `json:"name" required:"true"` + + // Description is a description of the domain. + Description string `json:"description,omitempty"` + + // Enabled sets the domain status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToDomainCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToDomainCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "domain") +} + +// Create creates a new Domain. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToDomainCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete deletes a domain. +func Delete(client *gophercloud.ServiceClient, domainID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, domainID), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToDomainUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents parameters to update a domain. +type UpdateOpts struct { + // Name is the name of the domain. + Name string `json:"name,omitempty"` + + // Description is the description of the domain. + Description string `json:"description,omitempty"` + + // Enabled sets the domain status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` +} + +// ToUpdateCreateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToDomainUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "domain") +} + +// Update modifies the attributes of a domain. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToDomainUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/results.go new file mode 100644 index 000000000000..5b8e2662b268 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/results.go @@ -0,0 +1,97 @@ +package domains + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// A Domain is a collection of projects, users, and roles. +type Domain struct { + // Description is the description of the Domain. + Description string `json:"description"` + + // Enabled is whether or not the domain is enabled. + Enabled bool `json:"enabled"` + + // ID is the unique ID of the domain. + ID string `json:"id"` + + // Links contains referencing links to the domain. + Links map[string]interface{} `json:"links"` + + // Name is the name of the domain. + Name string `json:"name"` +} + +type domainResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Domain. +type GetResult struct { + domainResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Domain. +type CreateResult struct { + domainResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a Domain. +type UpdateResult struct { + domainResult +} + +// DomainPage is a single page of Domain results. +type DomainPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Domains contains any results. +func (r DomainPage) IsEmpty() (bool, error) { + domains, err := ExtractDomains(r) + return len(domains) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r DomainPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractDomains returns a slice of Domains contained in a single page of +// results. +func ExtractDomains(r pagination.Page) ([]Domain, error) { + var s struct { + Domains []Domain `json:"domains"` + } + err := (r.(DomainPage)).ExtractInto(&s) + return s.Domains, err +} + +// Extract interprets any domainResults as a Domain. +func (r domainResult) Extract() (*Domain, error) { + var s struct { + Domain *Domain `json:"domain"` + } + err := r.ExtractInto(&s) + return s.Domain, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/fixtures.go new file mode 100644 index 000000000000..87ac561b5a73 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/fixtures.go @@ -0,0 +1,188 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/domains" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Domain results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/domains" + }, + "domains": [ + { + "enabled": true, + "id": "2844b2a08be147a08ef58317d6471f1f", + "links": { + "self": "http://example.com/identity/v3/domains/2844b2a08be147a08ef58317d6471f1f" + }, + "name": "domain one", + "description": "some description" + }, + { + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/domains/9fe1d3" + }, + "name": "domain two" + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "domain": { + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/domains/9fe1d3" + }, + "name": "domain two" + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "domain": { + "name": "domain two" + } +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "domain": { + "description": "Staging Domain" + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "domain": { + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/domains/9fe1d3" + }, + "name": "domain two", + "description": "Staging Domain" + } +} +` + +// FirstDomain is the first domain in the List request. +var FirstDomain = domains.Domain{ + Enabled: true, + ID: "2844b2a08be147a08ef58317d6471f1f", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/domains/2844b2a08be147a08ef58317d6471f1f", + }, + Name: "domain one", + Description: "some description", +} + +// SecondDomain is the second domain in the List request. +var SecondDomain = domains.Domain{ + Enabled: true, + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/domains/9fe1d3", + }, + Name: "domain two", +} + +// SecondDomainUpdated is how SecondDomain should look after an Update. +var SecondDomainUpdated = domains.Domain{ + Enabled: true, + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/domains/9fe1d3", + }, + Name: "domain two", + Description: "Staging Domain", +} + +// ExpectedDomainsSlice is the slice of domains expected to be returned from ListOutput. +var ExpectedDomainsSlice = []domains.Domain{FirstDomain, SecondDomain} + +// HandleListDomainsSuccessfully creates an HTTP handler at `/domains` on the +// test handler mux that responds with a list of two domains. +func HandleListDomainsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/domains", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetDomainSuccessfully creates an HTTP handler at `/domains` on the +// test handler mux that responds with a single domain. +func HandleGetDomainSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/domains/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateDomainSuccessfully creates an HTTP handler at `/domains` on the +// test handler mux that tests domain creation. +func HandleCreateDomainSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/domains", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleDeleteDomainSuccessfully creates an HTTP handler at `/domains` on the +// test handler mux that tests domain deletion. +func HandleDeleteDomainSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/domains/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateDomainSuccessfully creates an HTTP handler at `/domains` on the +// test handler mux that tests domain update. +func HandleUpdateDomainSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/domains/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/requests_test.go new file mode 100644 index 000000000000..73ac97aa7371 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/testing/requests_test.go @@ -0,0 +1,89 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/domains" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListDomains(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListDomainsSuccessfully(t) + + count := 0 + err := domains.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := domains.ExtractDomains(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedDomainsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListDomainsAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListDomainsSuccessfully(t) + + allPages, err := domains.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := domains.ExtractDomains(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedDomainsSlice, actual) +} + +func TestGetDomain(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetDomainSuccessfully(t) + + actual, err := domains.Get(client.ServiceClient(), "9fe1d3").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondDomain, *actual) +} + +func TestCreateDomain(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateDomainSuccessfully(t) + + createOpts := domains.CreateOpts{ + Name: "domain two", + } + + actual, err := domains.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondDomain, *actual) +} + +func TestDeleteDomain(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteDomainSuccessfully(t) + + res := domains.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateDomain(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateDomainSuccessfully(t) + + updateOpts := domains.UpdateOpts{ + Description: "Staging Domain", + } + + actual, err := domains.Update(client.ServiceClient(), "9fe1d3", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondDomainUpdated, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/urls.go new file mode 100644 index 000000000000..b0c21b80be70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/domains/urls.go @@ -0,0 +1,23 @@ +package domains + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("domains") +} + +func getURL(client *gophercloud.ServiceClient, domainID string) string { + return client.ServiceURL("domains", domainID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("domains") +} + +func deleteURL(client *gophercloud.ServiceClient, domainID string) string { + return client.ServiceURL("domains", domainID) +} + +func updateURL(client *gophercloud.ServiceClient, domainID string) string { + return client.ServiceURL("domains", domainID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go new file mode 100644 index 000000000000..5822017c9014 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/doc.go @@ -0,0 +1,69 @@ +/* +Package endpoints provides information and interaction with the service +endpoints API resource in the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#endpoints-v3 + +Example to List Endpoints + + serviceID := "e629d6e599d9489fb3ae5d9cc12eaea3" + + listOpts := endpoints.ListOpts{ + ServiceID: serviceID, + } + + allPages, err := endpoints.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allEndpoints, err := endpoints.ExtractEndpoints(allPages) + if err != nil { + panic(err) + } + + for _, endpoint := range allEndpoints { + fmt.Printf("%+v\n", endpoint) + } + +Example to Create an Endpoint + + serviceID := "e629d6e599d9489fb3ae5d9cc12eaea3" + + createOpts := endpoints.CreateOpts{ + Availability: gophercloud.AvailabilityPublic, + Name: "neutron", + Region: "RegionOne", + URL: "https://localhost:9696", + ServiceID: serviceID, + } + + endpoint, err := endpoints.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + + +Example to Update an Endpoint + + endpointID := "ad59deeec5154d1fa0dcff518596f499" + + updateOpts := endpoints.UpdateOpts{ + Region: "RegionTwo", + } + + endpoint, err := endpoints.Update(identityClient, endpointID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Endpoint + + endpointID := "ad59deeec5154d1fa0dcff518596f499" + err := endpoints.Delete(identityClient, endpointID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package endpoints diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go new file mode 100644 index 000000000000..0764a118e2b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/requests.go @@ -0,0 +1,136 @@ +package endpoints + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type CreateOptsBuilder interface { + ToEndpointCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains the subset of Endpoint attributes that should be used +// to create an Endpoint. +type CreateOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface" required:"true"` + + // Name is the name of the Endpoint. + Name string `json:"name" required:"true"` + + // Region is the region the Endpoint is located in. + // This field can be omitted or left as a blank string. + Region string `json:"region,omitempty"` + + // URL is the url of the Endpoint. + URL string `json:"url" required:"true"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id" required:"true"` +} + +// ToEndpointCreateMap builds a request body from the Endpoint Create options. +func (opts CreateOpts) ToEndpointCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint") +} + +// Create inserts a new Endpoint into the service catalog. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToEndpointCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(listURL(client), &b, &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add parameters to the List request. +type ListOptsBuilder interface { + ToEndpointListParams() (string, error) +} + +// ListOpts allows finer control over the endpoints returned by a List call. +// All fields are optional. +type ListOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `q:"interface"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `q:"service_id"` + + // RegionID is the ID of the region the Endpoint refers to. + RegionID int `q:"region_id"` +} + +// ToEndpointListParams builds a list request from the List options. +func (opts ListOpts) ToEndpointListParams() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates endpoints in a paginated collection, optionally filtered +// by ListOpts criteria. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + u := listURL(client) + if opts != nil { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return pagination.Pager{Err: err} + } + u += q.String() + } + return pagination.NewPager(client, u, func(r pagination.PageResult) pagination.Page { + return EndpointPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add parameters to the Update request. +type UpdateOptsBuilder interface { + ToEndpointUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the subset of Endpoint attributes that should be used to +// update an Endpoint. +type UpdateOpts struct { + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface,omitempty"` + + // Name is the name of the Endpoint. + Name string `json:"name,omitempty"` + + // Region is the region the Endpoint is located in. + // This field can be omitted or left as a blank string. + Region string `json:"region,omitempty"` + + // URL is the url of the Endpoint. + URL string `json:"url,omitempty"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id,omitempty"` +} + +// ToEndpointUpdateMap builds an update request body from the Update options. +func (opts UpdateOpts) ToEndpointUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint") +} + +// Update changes an existing endpoint with new data. +func Update(client *gophercloud.ServiceClient, endpointID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToEndpointUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(endpointURL(client, endpointID), &b, &r.Body, nil) + return +} + +// Delete removes an endpoint from the service catalog. +func Delete(client *gophercloud.ServiceClient, endpointID string) (r DeleteResult) { + _, r.Err = client.Delete(endpointURL(client, endpointID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go new file mode 100644 index 000000000000..6e54f6b4138a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/results.go @@ -0,0 +1,80 @@ +package endpoints + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete +// Endpoint. An error is returned if the original call or the extraction failed. +func (r commonResult) Extract() (*Endpoint, error) { + var s struct { + Endpoint *Endpoint `json:"endpoint"` + } + err := r.ExtractInto(&s) + return s.Endpoint, err +} + +// CreateResult is the response from a Create operation. Call its Extract +// method to interpret it as an Endpoint. +type CreateResult struct { + commonResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as an Endpoint. +type UpdateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Endpoint describes the entry point for another service's API. +type Endpoint struct { + // ID is the unique ID of the endpoint. + ID string `json:"id"` + + // Availability is the interface type of the Endpoint (admin, internal, + // or public), referenced by the gophercloud.Availability type. + Availability gophercloud.Availability `json:"interface"` + + // Name is the name of the Endpoint. + Name string `json:"name"` + + // Region is the region the Endpoint is located in. + Region string `json:"region"` + + // ServiceID is the ID of the service the Endpoint refers to. + ServiceID string `json:"service_id"` + + // URL is the url of the Endpoint. + URL string `json:"url"` +} + +// EndpointPage is a single page of Endpoint results. +type EndpointPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if no Endpoints were returned. +func (r EndpointPage) IsEmpty() (bool, error) { + es, err := ExtractEndpoints(r) + return len(es) == 0, err +} + +// ExtractEndpoints extracts an Endpoint slice from a Page. +func ExtractEndpoints(r pagination.Page) ([]Endpoint, error) { + var s struct { + Endpoints []Endpoint `json:"endpoints"` + } + err := (r.(EndpointPage)).ExtractInto(&s) + return s.Endpoints, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/doc.go new file mode 100644 index 000000000000..1370fdcc77b4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/doc.go @@ -0,0 +1,2 @@ +// endpoints unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/requests_test.go new file mode 100644 index 000000000000..53d8488896ba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/testing/requests_test.go @@ -0,0 +1,214 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateSuccessful(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "endpoint": { + "interface": "public", + "name": "the-endiest-of-points", + "region": "underground", + "url": "https://1.2.3.4:9000/", + "service_id": "asdfasdfasdfasdf" + } + } + `) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "endpoint": { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "the-endiest-of-points", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + } + } + `) + }) + + actual, err := endpoints.Create(client.ServiceClient(), endpoints.CreateOpts{ + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + URL: "https://1.2.3.4:9000/", + ServiceID: "asdfasdfasdfasdf", + }).Extract() + th.AssertNoErr(t, err) + + expected := &endpoints.Endpoint{ + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + } + + th.AssertDeepEquals(t, expected, actual) +} + +func TestListEndpoints(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/endpoints", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, ` + { + "endpoints": [ + { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "the-endiest-of-points", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + }, + { + "id": "13", + "interface": "internal", + "links": { + "self": "https://localhost:5000/v3/endpoints/13" + }, + "name": "shhhh", + "region": "underground", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9001/" + } + ], + "links": { + "next": null, + "previous": null + } + } + `) + }) + + count := 0 + endpoints.List(client.ServiceClient(), endpoints.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := endpoints.ExtractEndpoints(page) + if err != nil { + t.Errorf("Failed to extract endpoints: %v", err) + return false, err + } + + expected := []endpoints.Endpoint{ + { + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "the-endiest-of-points", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + }, + { + ID: "13", + Availability: gophercloud.AvailabilityInternal, + Name: "shhhh", + Region: "underground", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9001/", + }, + } + th.AssertDeepEquals(t, expected, actual) + return true, nil + }) + th.AssertEquals(t, 1, count) +} + +func TestUpdateEndpoint(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/endpoints/12", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ` + { + "endpoint": { + "name": "renamed", + "region": "somewhere-else" + } + } + `) + + fmt.Fprintf(w, ` + { + "endpoint": { + "id": "12", + "interface": "public", + "links": { + "self": "https://localhost:5000/v3/endpoints/12" + }, + "name": "renamed", + "region": "somewhere-else", + "service_id": "asdfasdfasdfasdf", + "url": "https://1.2.3.4:9000/" + } + } + `) + }) + + actual, err := endpoints.Update(client.ServiceClient(), "12", endpoints.UpdateOpts{ + Name: "renamed", + Region: "somewhere-else", + }).Extract() + if err != nil { + t.Fatalf("Unexpected error from Update: %v", err) + } + + expected := &endpoints.Endpoint{ + ID: "12", + Availability: gophercloud.AvailabilityPublic, + Name: "renamed", + Region: "somewhere-else", + ServiceID: "asdfasdfasdfasdf", + URL: "https://1.2.3.4:9000/", + } + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteEndpoint(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/endpoints/34", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + + res := endpoints.Delete(client.ServiceClient(), "34") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go new file mode 100644 index 000000000000..80cf57eb352e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/endpoints/urls.go @@ -0,0 +1,11 @@ +package endpoints + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("endpoints") +} + +func endpointURL(client *gophercloud.ServiceClient, endpointID string) string { + return client.ServiceURL("endpoints", endpointID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/doc.go new file mode 100644 index 000000000000..8db7724f2b0a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/doc.go @@ -0,0 +1,26 @@ +/* +Package trusts enables management of OpenStack Identity Trusts. + +Example to Create a Token with Username, Password, and Trust ID + + var trustToken struct { + tokens.Token + trusts.TokenExt + } + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + createOpts := trusts.AuthOptsExt{ + AuthOptionsBuilder: authOptions, + TrustID: "de0945a", + } + + err := tokens.Create(identityClient, createOpts).ExtractInto(&trustToken) + if err != nil { + panic(err) + } +*/ +package trusts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/requests.go new file mode 100644 index 000000000000..438fba61de56 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/requests.go @@ -0,0 +1,39 @@ +package trusts + +import "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + +// AuthOptsExt extends the base Identity v3 tokens AuthOpts with a TrustID. +type AuthOptsExt struct { + tokens.AuthOptionsBuilder + + // TrustID is the ID of the trust. + TrustID string `json:"id"` +} + +// ToTokenV3CreateMap builds a create request body from the AuthOpts. +func (opts AuthOptsExt) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + return opts.AuthOptionsBuilder.ToTokenV3CreateMap(scope) +} + +// ToTokenV3ScopeMap builds a scope from AuthOpts. +func (opts AuthOptsExt) ToTokenV3ScopeMap() (map[string]interface{}, error) { + b, err := opts.AuthOptionsBuilder.ToTokenV3ScopeMap() + if err != nil { + return nil, err + } + + if opts.TrustID != "" { + if b == nil { + b = make(map[string]interface{}) + } + b["OS-TRUST:trust"] = map[string]interface{}{ + "id": opts.TrustID, + } + } + + return b, nil +} + +func (opts AuthOptsExt) CanReauth() bool { + return opts.AuthOptionsBuilder.CanReauth() +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/results.go new file mode 100644 index 000000000000..e6912e612c27 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/results.go @@ -0,0 +1,27 @@ +package trusts + +// TrusteeUser represents the trusted user ID of a trust. +type TrusteeUser struct { + ID string `json:"id"` +} + +// TrustorUser represents the trusting user ID of a trust. +type TrustorUser struct { + ID string `json:"id"` +} + +// Trust represents a delegated authorization request between two +// identities. +type Trust struct { + ID string `json:"id"` + Impersonation bool `json:"impersonation"` + TrusteeUser TrusteeUser `json:"trustee_user"` + TrustorUser TrustorUser `json:"trustor_user"` + RedelegatedTrustID string `json:"redelegated_trust_id"` + RedelegationCount int `json:"redelegation_count"` +} + +// TokenExt represents an extension of the base token result. +type TokenExt struct { + Trust Trust `json:"OS-TRUST:trust"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/doc.go new file mode 100644 index 000000000000..e9614fdccac8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/doc.go @@ -0,0 +1,2 @@ +// trusts unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/fixtures.go new file mode 100644 index 000000000000..e3115264b0bc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/fixtures.go @@ -0,0 +1,67 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/testhelper" +) + +// HandleCreateTokenWithTrustID verifies that providing certain AuthOptions and Scope results in an expected JSON structure. +func HandleCreateTokenWithTrustID(t *testing.T, options tokens.AuthOptionsBuilder, requestJSON string) { + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "POST") + testhelper.TestHeader(t, r, "Content-Type", "application/json") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestJSONRequest(t, r, requestJSON) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "token": { + "expires_at": "2013-02-27T18:30:59.999999Z", + "issued_at": "2013-02-27T16:30:59.999999Z", + "methods": [ + "password" + ], + "OS-TRUST:trust": { + "id": "fe0aef", + "impersonation": false, + "redelegated_trust_id": "3ba234", + "redelegation_count": 2, + "links": { + "self": "http://example.com/identity/v3/trusts/fe0aef" + }, + "trustee_user": { + "id": "0ca8f6", + "links": { + "self": "http://example.com/identity/v3/users/0ca8f6" + } + }, + "trustor_user": { + "id": "bd263c", + "links": { + "self": "http://example.com/identity/v3/users/bd263c" + } + } + }, + "user": { + "domain": { + "id": "1789d1", + "links": { + "self": "http://example.com/identity/v3/domains/1789d1" + }, + "name": "example.com" + }, + "email": "joe@example.com", + "id": "0ca8f6", + "links": { + "self": "http://example.com/identity/v3/users/0ca8f6" + }, + "name": "Joe" + } + } +}`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/requests_test.go new file mode 100644 index 000000000000..f8a65adb5c95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts/testing/requests_test.go @@ -0,0 +1,74 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts" + "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateUserIDPasswordTrustID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + ao := trusts.AuthOptsExt{ + TrustID: "de0945a", + AuthOptionsBuilder: &tokens.AuthOptions{ + UserID: "me", + Password: "squirrel!", + }, + } + HandleCreateTokenWithTrustID(t, ao, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { "id": "me", "password": "squirrel!" } + } + }, + "scope": { + "OS-TRUST:trust": { + "id": "de0945a" + } + } + } + } + `) + + var actual struct { + tokens.Token + trusts.TokenExt + } + err := tokens.Create(client.ServiceClient(), ao).ExtractInto(&actual) + if err != nil { + t.Errorf("Create returned an error: %v", err) + } + expected := struct { + tokens.Token + trusts.TokenExt + }{ + tokens.Token{ + ExpiresAt: time.Date(2013, 02, 27, 18, 30, 59, 999999000, time.UTC), + }, + trusts.TokenExt{ + Trust: trusts.Trust{ + ID: "fe0aef", + Impersonation: false, + TrusteeUser: trusts.TrusteeUser{ + ID: "0ca8f6", + }, + TrustorUser: trusts.TrustorUser{ + ID: "bd263c", + }, + RedelegatedTrustID: "3ba234", + RedelegationCount: 2, + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go new file mode 100644 index 000000000000..696e2a5d8e53 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/doc.go @@ -0,0 +1,60 @@ +/* +Package groups manages and retrieves Groups in the OpenStack Identity Service. + +Example to List Groups + + listOpts := groups.ListOpts{ + DomainID: "default", + } + + allPages, err := groups.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Group + + createOpts := groups.CreateOpts{ + Name: "groupname", + DomainID: "default", + Extra: map[string]interface{}{ + "email": "groupname@example.com", + } + } + + group, err := groups.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := groups.UpdateOpts{ + Description: "Updated Description for group", + } + + group, err := groups.Update(identityClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Group + + groupID := "0fe36e73809d46aeae6705c39077b1b3" + err := groups.Delete(identityClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go new file mode 100644 index 000000000000..98e6fe4b0e53 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/errors.go @@ -0,0 +1,17 @@ +package groups + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go new file mode 100644 index 000000000000..cc991204cfa1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/requests.go @@ -0,0 +1,180 @@ +package groups + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToGroupListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Name filters the response by group name. + Name string `q:"name"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Groups to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return GroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single group, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a group. +type CreateOpts struct { + // Name is the name of the new group. + Name string `json:"name" required:"true"` + + // Description is a description of the group. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToGroupCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Group. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a group. +type UpdateOpts struct { + // Name is the name of the new group. + Name string `json:"name,omitempty"` + + // Description is a description of the group. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the group belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the group. + Extra map[string]interface{} `json:"-"` +} + +// ToGroupUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToGroupUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "group") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["group"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Group. +func Update(client *gophercloud.ServiceClient, groupID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, groupID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a group. +func Delete(client *gophercloud.ServiceClient, groupID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, groupID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go new file mode 100644 index 000000000000..ba7d018d1739 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/results.go @@ -0,0 +1,132 @@ +package groups + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Group helps manage related users. +type Group struct { + // Description describes the group purpose. + Description string `json:"description"` + + // DomainID is the domain ID the group belongs to. + DomainID string `json:"domain_id"` + + // ID is the unique ID of the group. + ID string `json:"id"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // Links contains referencing links to the group. + Links map[string]interface{} `json:"links"` + + // Name is the name of the group. + Name string `json:"name"` +} + +func (r *Group) UnmarshalJSON(b []byte) error { + type tmp Group + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Group(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Group{}, resultMap) + } + } + + return err +} + +type groupResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Group. +type GetResult struct { + groupResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Group. +type CreateResult struct { + groupResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Group. +type UpdateResult struct { + groupResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GroupPage is a single page of Group results. +type GroupPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Groups contains any results. +func (r GroupPage) IsEmpty() (bool, error) { + groups, err := ExtractGroups(r) + return len(groups) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r GroupPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractGroups returns a slice of Groups contained in a single page of results. +func ExtractGroups(r pagination.Page) ([]Group, error) { + var s struct { + Groups []Group `json:"groups"` + } + err := (r.(GroupPage)).ExtractInto(&s) + return s.Groups, err +} + +// Extract interprets any group results as a Group. +func (r groupResult) Extract() (*Group, error) { + var s struct { + Group *Group `json:"group"` + } + err := r.ExtractInto(&s) + return s.Group, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/fixtures.go new file mode 100644 index 000000000000..58f3503785e4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/fixtures.go @@ -0,0 +1,216 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Group results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/groups" + }, + "groups": [ + { + "domain_id": "default", + "id": "2844b2a08be147a08ef58317d6471f1f", + "description": "group for internal support users", + "links": { + "self": "http://example.com/identity/v3/groups/2844b2a08be147a08ef58317d6471f1f" + }, + "name": "internal support", + "extra": { + "email": "support@localhost" + } + }, + { + "domain_id": "1789d1", + "id": "9fe1d3", + "description": "group for support users", + "links": { + "self": "https://example.com/identity/v3/groups/9fe1d3" + }, + "name": "support", + "extra": { + "email": "support@example.com" + } + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "group": { + "domain_id": "1789d1", + "id": "9fe1d3", + "description": "group for support users", + "links": { + "self": "https://example.com/identity/v3/groups/9fe1d3" + }, + "name": "support", + "extra": { + "email": "support@example.com" + } + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "group": { + "domain_id": "1789d1", + "name": "support", + "description": "group for support users", + "email": "support@example.com" + } +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "group": { + "description": "L2 Support Team", + "email": "supportteam@example.com" + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "group": { + "domain_id": "1789d1", + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/groups/9fe1d3" + }, + "name": "support", + "description": "L2 Support Team", + "extra": { + "email": "supportteam@example.com" + } + } +} +` + +// FirstGroup is the first group in the List request. +var FirstGroup = groups.Group{ + DomainID: "default", + ID: "2844b2a08be147a08ef58317d6471f1f", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/groups/2844b2a08be147a08ef58317d6471f1f", + }, + Name: "internal support", + Description: "group for internal support users", + Extra: map[string]interface{}{ + "email": "support@localhost", + }, +} + +// SecondGroup is the second group in the List request. +var SecondGroup = groups.Group{ + DomainID: "1789d1", + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/groups/9fe1d3", + }, + Name: "support", + Description: "group for support users", + Extra: map[string]interface{}{ + "email": "support@example.com", + }, +} + +// SecondGroupUpdated is how SecondGroup should look after an Update. +var SecondGroupUpdated = groups.Group{ + DomainID: "1789d1", + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/groups/9fe1d3", + }, + Name: "support", + Description: "L2 Support Team", + Extra: map[string]interface{}{ + "email": "supportteam@example.com", + }, +} + +// ExpectedGroupsSlice is the slice of groups expected to be returned from ListOutput. +var ExpectedGroupsSlice = []groups.Group{FirstGroup, SecondGroup} + +// HandleListGroupsSuccessfully creates an HTTP handler at `/groups` on the +// test handler mux that responds with a list of two groups. +func HandleListGroupsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetGroupSuccessfully creates an HTTP handler at `/groups` on the +// test handler mux that responds with a single group. +func HandleGetGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateGroupSuccessfully creates an HTTP handler at `/groups` on the +// test handler mux that tests group creation. +func HandleCreateGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleUpdateGroupSuccessfully creates an HTTP handler at `/groups` on the +// test handler mux that tests group update. +func HandleUpdateGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandleDeleteGroupSuccessfully creates an HTTP handler at `/groups` on the +// test handler mux that tests group deletion. +func HandleDeleteGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/requests_test.go new file mode 100644 index 000000000000..b62779a9ad70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/testing/requests_test.go @@ -0,0 +1,133 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListGroupsSuccessfully(t) + + count := 0 + err := groups.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := groups.ExtractGroups(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedGroupsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListGroupsAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListGroupsSuccessfully(t) + + allPages, err := groups.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := groups.ExtractGroups(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedGroupsSlice, actual) + th.AssertEquals(t, ExpectedGroupsSlice[0].Extra["email"], "support@localhost") + th.AssertEquals(t, ExpectedGroupsSlice[1].Extra["email"], "support@example.com") +} + +func TestListGroupsFiltersCheck(t *testing.T) { + type test struct { + filterName string + wantErr bool + } + tests := []test{ + {"foo__contains", false}, + {"foo", true}, + {"foo_contains", true}, + {"foo__", true}, + {"__foo", true}, + } + + var listOpts groups.ListOpts + for _, _test := range tests { + listOpts.Filters = map[string]string{_test.filterName: "bar"} + _, err := listOpts.ToGroupListQuery() + + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case groups.InvalidListFilter: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestGetGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetGroupSuccessfully(t) + + actual, err := groups.Get(client.ServiceClient(), "9fe1d3").Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondGroup, *actual) + th.AssertEquals(t, SecondGroup.Extra["email"], "support@example.com") +} + +func TestCreateGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateGroupSuccessfully(t) + + createOpts := groups.CreateOpts{ + Name: "support", + DomainID: "1789d1", + Description: "group for support users", + Extra: map[string]interface{}{ + "email": "support@example.com", + }, + } + + actual, err := groups.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondGroup, *actual) +} + +func TestUpdateGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateGroupSuccessfully(t) + + updateOpts := groups.UpdateOpts{ + Description: "L2 Support Team", + Extra: map[string]interface{}{ + "email": "supportteam@example.com", + }, + } + + actual, err := groups.Update(client.ServiceClient(), "9fe1d3", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondGroupUpdated, *actual) +} + +func TestDeleteGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteGroupSuccessfully(t) + + res := groups.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go new file mode 100644 index 000000000000..e7d1e53b27fa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/groups/urls.go @@ -0,0 +1,23 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func getURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("groups") +} + +func updateURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} + +func deleteURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/doc.go new file mode 100644 index 000000000000..0bdc7a68510e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/doc.go @@ -0,0 +1,77 @@ +/* +Package policies provides information and interaction with the policies API +resource for the OpenStack Identity service. + +Example to List Policies + + listOpts := policies.ListOpts{ + Type: "application/json", + } + + allPages, err := policies.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + +Example to Create a Policy + + createOpts := policies.CreateOpts{ + Type: "application/json", + Blob: []byte("{'foobar_user': 'role:compute-user'}"), + Extra: map[string]interface{}{ + "description": "policy for foobar_user", + }, + } + + policy, err := policies.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Policy + + policyID := "0fe36e73809d46aeae6705c39077b1b3" + policy, err := policies.Get(identityClient, policyID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policy) + +Example to Update a Policy + + policyID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := policies.UpdateOpts{ + Type: "application/json", + Blob: []byte("{'foobar_user': 'role:compute-user'}"), + Extra: map[string]interface{}{ + "description": "policy for foobar_user", + }, + } + + policy, err := policies.Update(identityClient, policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", policy) + +Example to Delete a Policy + + policyID := "0fe36e73809d46aeae6705c39077b1b3" + err := policies.Delete(identityClient, policyID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/errors.go new file mode 100644 index 000000000000..27fb4b1cb095 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/errors.go @@ -0,0 +1,31 @@ +package policies + +import "fmt" + +// InvalidListFilter is returned by the ToPolicyListQuery method when +// validation of a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of TYPE__COMPARATOR", + e.FilterName, + ) + return s +} + +// StringFieldLengthExceedsLimit is returned by the +// ToPolicyCreateMap/ToPolicyUpdateMap methods when validation of +// a type does not pass +type StringFieldLengthExceedsLimit struct { + Field string + Limit int +} + +func (e StringFieldLengthExceedsLimit) Error() string { + return fmt.Sprintf("String length of field [%s] exceeds limit (%d)", + e.Field, e.Limit, + ) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/requests.go new file mode 100644 index 000000000000..c9640b2278cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/requests.go @@ -0,0 +1,195 @@ +package policies + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +const policyTypeMaxLength = 255 + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Type filters the response by MIME media type + // of the serialized policy blob. + Type string `q:"type"` + + // Filters filters the response by custom filters such as + // 'type__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the policies to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a policy. +type CreateOpts struct { + // Type is the MIME media type of the serialized policy blob. + Type string `json:"type" required:"true"` + + // Blob is the policy rule as a serialized blob. + Blob []byte `json:"-" required:"true"` + + // Extra is free-form extra key/value pairs to describe the policy. + Extra map[string]interface{} `json:"-"` +} + +// ToPolicyCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + if len(opts.Type) > policyTypeMaxLength { + return nil, StringFieldLengthExceedsLimit{ + Field: "type", + Limit: policyTypeMaxLength, + } + } + + b, err := gophercloud.BuildRequestBody(opts, "policy") + if err != nil { + return nil, err + } + + if v, ok := b["policy"].(map[string]interface{}); ok { + v["blob"] = string(opts.Blob) + + if opts.Extra != nil { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Policy. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Get retrieves details on a single policy, by ID. +func Get(client *gophercloud.ServiceClient, policyID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, policyID), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a policy. +type UpdateOpts struct { + // Type is the MIME media type of the serialized policy blob. + Type string `json:"type,omitempty"` + + // Blob is the policy rule as a serialized blob. + Blob []byte `json:"-"` + + // Extra is free-form extra key/value pairs to describe the policy. + Extra map[string]interface{} `json:"-"` +} + +// ToPolicyUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + if len(opts.Type) > policyTypeMaxLength { + return nil, StringFieldLengthExceedsLimit{ + Field: "type", + Limit: policyTypeMaxLength, + } + } + + b, err := gophercloud.BuildRequestBody(opts, "policy") + if err != nil { + return nil, err + } + + if v, ok := b["policy"].(map[string]interface{}); ok { + if len(opts.Blob) != 0 { + v["blob"] = string(opts.Blob) + } + + if opts.Extra != nil { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Role. +func Update(client *gophercloud.ServiceClient, policyID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, policyID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a policy. +func Delete(client *gophercloud.ServiceClient, policyID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, policyID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/results.go new file mode 100644 index 000000000000..131a9f810351 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/results.go @@ -0,0 +1,131 @@ +package policies + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is an arbitrarily serialized policy engine rule +// set to be consumed by a remote service. +type Policy struct { + // ID is the unique ID of the policy. + ID string `json:"id"` + + // Blob is the policy rule as a serialized blob. + Blob string `json:"blob"` + + // Type is the MIME media type of the serialized policy blob. + Type string `json:"type"` + + // Links contains referencing links to the policy. + Links map[string]interface{} `json:"links"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` +} + +func (r *Policy) UnmarshalJSON(b []byte) error { + type tmp Policy + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Policy(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Policy{}, resultMap) + } + } + + return err +} + +type policyResult struct { + gophercloud.Result +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Policy +type CreateResult struct { + policyResult +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Policy. +type GetResult struct { + policyResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Policy. +type UpdateResult struct { + policyResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// PolicyPage is a single page of Policy results. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Policies contains any results. +func (r PolicyPage) IsEmpty() (bool, error) { + policies, err := ExtractPolicies(r) + return len(policies) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractPolicies returns a slice of Policies +// contained in a single page of results. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"policies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// Extract interprets any policyResults as a Policy. +func (r policyResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"policy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/doc.go new file mode 100644 index 000000000000..80554d4418e3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/doc.go @@ -0,0 +1,2 @@ +// Package testing contains policies unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/fixtures.go new file mode 100644 index 000000000000..24bca7e17501 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/fixtures.go @@ -0,0 +1,229 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/policies" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Policy results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/policies" + }, + "policies": [ + { + "type": "text/plain", + "id": "2844b2a08be147a08ef58317d6471f1f", + "links": { + "self": "http://example.com/identity/v3/policies/2844b2a08be147a08ef58317d6471f1f" + }, + "blob": "'foo_user': 'role:compute-user'" + }, + { + "type": "application/json", + "id": "b49884da9d31494ea02aff38d4b4e701", + "links": { + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701" + }, + "blob": "{'bar_user': 'role:network-user'}", + "description": "policy for bar_user" + } + ] +} +` + +// ListWithFilterOutput provides a single page of filtered Policy results. +const ListWithFilterOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/policies" + }, + "policies": [ + { + "type": "application/json", + "id": "b49884da9d31494ea02aff38d4b4e701", + "links": { + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701" + }, + "blob": "{'bar_user': 'role:network-user'}", + "description": "policy for bar_user" + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "policy": { + "type": "application/json", + "id": "b49884da9d31494ea02aff38d4b4e701", + "links": { + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701" + }, + "blob": "{'bar_user': 'role:network-user'}", + "description": "policy for bar_user" + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "policy": { + "blob": "{'bar_user': 'role:network-user'}", + "description": "policy for bar_user", + "type": "application/json" + } +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "policy": { + "description": "updated policy for bar_user" + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "policy": { + "type": "application/json", + "id": "b49884da9d31494ea02aff38d4b4e701", + "links": { + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701" + }, + "blob": "{'bar_user': 'role:network-user'}", + "description": "updated policy for bar_user" + } +} +` + +// FirstPolicy is the first policy in the List request. +var FirstPolicy = policies.Policy{ + ID: "2844b2a08be147a08ef58317d6471f1f", + Blob: "'foo_user': 'role:compute-user'", + Type: "text/plain", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/policies/2844b2a08be147a08ef58317d6471f1f", + }, + Extra: map[string]interface{}{}, +} + +// SecondPolicy is the second policy in the List request. +var SecondPolicy = policies.Policy{ + ID: "b49884da9d31494ea02aff38d4b4e701", + Blob: "{'bar_user': 'role:network-user'}", + Type: "application/json", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701", + }, + Extra: map[string]interface{}{ + "description": "policy for bar_user", + }, +} + +// SecondPolicyUpdated is the policy in the Update request. +var SecondPolicyUpdated = policies.Policy{ + ID: "b49884da9d31494ea02aff38d4b4e701", + Blob: "{'bar_user': 'role:network-user'}", + Type: "application/json", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/policies/b49884da9d31494ea02aff38d4b4e701", + }, + Extra: map[string]interface{}{ + "description": "updated policy for bar_user", + }, +} + +// ExpectedPoliciesSlice is the slice of policies expected to be returned from ListOutput. +var ExpectedPoliciesSlice = []policies.Policy{FirstPolicy, SecondPolicy} + +// HandleListPoliciesSuccessfully creates an HTTP handler at `/policies` on the +// test handler mux that responds with a list of two policies. +func HandleListPoliciesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + switch r.URL.Query().Get("type") { + case "": + fmt.Fprintf(w, ListOutput) + case "application/json": + fmt.Fprintf(w, ListWithFilterOutput) + default: + w.WriteHeader(http.StatusBadRequest) + } + }) +} + +// HandleCreatePolicySuccessfully creates an HTTP handler at `/policies` on the +// test handler mux that tests policy creation. +func HandleCreatePolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleGetPolicySuccessfully creates an HTTP handler at `/policies` on the +// test handler mux that responds with a single policy. +func HandleGetPolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/policies/b49884da9d31494ea02aff38d4b4e701", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }, + ) +} + +// HandleUpdatePolicySuccessfully creates an HTTP handler at `/policies` on the +// test handler mux that tests role update. +func HandleUpdatePolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/policies/b49884da9d31494ea02aff38d4b4e701", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }, + ) +} + +// HandleDeletePolicySuccessfully creates an HTTP handler at `/policies` on the +// test handler mux that tests policy deletion. +func HandleDeletePolicySuccessfully(t *testing.T) { + th.Mux.HandleFunc("/policies/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/requests_test.go new file mode 100644 index 000000000000..6348fd57f172 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/testing/requests_test.go @@ -0,0 +1,229 @@ +package testing + +import ( + "fmt" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/policies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListPolicies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListPoliciesSuccessfully(t) + + count := 0 + err := policies.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := policies.ExtractPolicies(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedPoliciesSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListPoliciesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListPoliciesSuccessfully(t) + + allPages, err := policies.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedPoliciesSlice, actual) +} + +func TestListPoliciesWithFilter(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListPoliciesSuccessfully(t) + + listOpts := policies.ListOpts{ + Type: "application/json", + } + allPages, err := policies.List(client.ServiceClient(), listOpts).AllPages() + th.AssertNoErr(t, err) + actual, err := policies.ExtractPolicies(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, []policies.Policy{SecondPolicy}, actual) +} + +func TestListPoliciesFiltersCheck(t *testing.T) { + type test struct { + filterName string + wantErr bool + } + tests := []test{ + {"foo__contains", false}, + {"foo", true}, + {"foo_contains", true}, + {"foo__", true}, + {"__foo", true}, + } + + var listOpts policies.ListOpts + for _, _test := range tests { + listOpts.Filters = map[string]string{_test.filterName: "bar"} + _, err := listOpts.ToPolicyListQuery() + + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case policies.InvalidListFilter: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestCreatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreatePolicySuccessfully(t) + + createOpts := policies.CreateOpts{ + Type: "application/json", + Blob: []byte("{'bar_user': 'role:network-user'}"), + Extra: map[string]interface{}{ + "description": "policy for bar_user", + }, + } + + actual, err := policies.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondPolicy, *actual) +} + +func TestCreatePolicyTypeLengthCheck(t *testing.T) { + // strGenerator generates a string of fixed length filled with '0' + strGenerator := func(length int) string { + return fmt.Sprintf(fmt.Sprintf("%%0%dd", length), 0) + } + + type test struct { + length int + wantErr bool + } + + tests := []test{ + {100, false}, + {255, false}, + {256, true}, + {300, true}, + } + + createOpts := policies.CreateOpts{ + Blob: []byte("{'bar_user': 'role:network-user'}"), + } + + for _, _test := range tests { + createOpts.Type = strGenerator(_test.length) + if len(createOpts.Type) != _test.length { + t.Fatal("function strGenerator does not work properly") + } + + _, err := createOpts.ToPolicyCreateMap() + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case policies.StringFieldLengthExceedsLimit: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestGetPolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetPolicySuccessfully(t) + + id := "b49884da9d31494ea02aff38d4b4e701" + actual, err := policies.Get(client.ServiceClient(), id).Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondPolicy, *actual) +} + +func TestUpdatePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdatePolicySuccessfully(t) + + updateOpts := policies.UpdateOpts{ + Extra: map[string]interface{}{ + "description": "updated policy for bar_user", + }, + } + + id := "b49884da9d31494ea02aff38d4b4e701" + actual, err := policies.Update(client.ServiceClient(), id, updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondPolicyUpdated, *actual) +} + +func TestUpdatePolicyTypeLengthCheck(t *testing.T) { + // strGenerator generates a string of fixed length filled with '0' + strGenerator := func(length int) string { + return fmt.Sprintf(fmt.Sprintf("%%0%dd", length), 0) + } + + type test struct { + length int + wantErr bool + } + + tests := []test{ + {100, false}, + {255, false}, + {256, true}, + {300, true}, + } + + var updateOpts policies.UpdateOpts + for _, _test := range tests { + updateOpts.Type = strGenerator(_test.length) + if len(updateOpts.Type) != _test.length { + t.Fatal("function strGenerator does not work properly") + } + + _, err := updateOpts.ToPolicyUpdateMap() + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case policies.StringFieldLengthExceedsLimit: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestDeletePolicy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeletePolicySuccessfully(t) + + res := policies.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/urls.go new file mode 100644 index 000000000000..df0d183b438d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/policies/urls.go @@ -0,0 +1,25 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const policyPath = "policies" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(policyPath) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(policyPath) +} + +func getURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(policyPath, policyID) +} + +func updateURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(policyPath, policyID) +} + +func deleteURL(client *gophercloud.ServiceClient, policyID string) string { + return client.ServiceURL(policyPath, policyID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go new file mode 100644 index 000000000000..4f5b45ab6551 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/doc.go @@ -0,0 +1,58 @@ +/* +Package projects manages and retrieves Projects in the OpenStack Identity +Service. + +Example to List Projects + + listOpts := projects.ListOpts{ + Enabled: gophercloud.Enabled, + } + + allPages, err := projects.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to Create a Project + + createOpts := projects.CreateOpts{ + Name: "project_name", + Description: "Project Description" + } + + project, err := projects.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + + updateOpts := projects.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + project, err := projects.Update(identityClient, projectID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Project + + projectID := "966b3c7d36a24facaf20b7e458bf2192" + err := projects.Delete(identityClient, projectID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package projects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go new file mode 100644 index 000000000000..7be97d8594df --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/errors.go @@ -0,0 +1,17 @@ +package projects + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go new file mode 100644 index 000000000000..30f3d1f9ea5a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/requests.go @@ -0,0 +1,174 @@ +package projects + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToProjectListQuery() (string, error) +} + +// ListOpts enables filtering of a list request. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled projects. + Enabled *bool `q:"enabled"` + + // IsDomain filters the response by projects that are domains. + // Setting this to true is effectively listing domains. + IsDomain *bool `q:"is_domain"` + + // Name filters the response by project name. + Name string `q:"name"` + + // ParentID filters the response by projects of a given parent project. + ParentID string `q:"parent_id"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToProjectListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToProjectListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Projects to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToProjectListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ProjectPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single project, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToProjectCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents parameters used to create a project. +type CreateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name" required:"true"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description string `json:"description,omitempty"` +} + +// ToProjectCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToProjectCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Create creates a new Project. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToProjectCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, nil) + return +} + +// Delete deletes a project. +func Delete(client *gophercloud.ServiceClient, projectID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, projectID), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToProjectUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents parameters to update a project. +type UpdateOpts struct { + // DomainID is the ID this project will belong under. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the project status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // IsDomain indicates if this project is a domain. + IsDomain *bool `json:"is_domain,omitempty"` + + // Name is the name of the project. + Name string `json:"name,omitempty"` + + // ParentID specifies the parent project of this new project. + ParentID string `json:"parent_id,omitempty"` + + // Description is the description of the project. + Description string `json:"description,omitempty"` +} + +// ToUpdateCreateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToProjectUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "project") +} + +// Update modifies the attributes of a project. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToProjectUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go new file mode 100644 index 000000000000..a13fa7f2ae1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/results.go @@ -0,0 +1,103 @@ +package projects + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type projectResult struct { + gophercloud.Result +} + +// GetResult is the result of a Get request. Call its Extract method to +// interpret it as a Project. +type GetResult struct { + projectResult +} + +// CreateResult is the result of a Create request. Call its Extract method to +// interpret it as a Project. +type CreateResult struct { + projectResult +} + +// DeleteResult is the result of a Delete request. Call its ExtractErr method to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the result of an Update request. Call its Extract method to +// interpret it as a Project. +type UpdateResult struct { + projectResult +} + +// Project represents an OpenStack Identity Project. +type Project struct { + // IsDomain indicates whether the project is a domain. + IsDomain bool `json:"is_domain"` + + // Description is the description of the project. + Description string `json:"description"` + + // DomainID is the domain ID the project belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the project is enabled. + Enabled bool `json:"enabled"` + + // ID is the unique ID of the project. + ID string `json:"id"` + + // Name is the name of the project. + Name string `json:"name"` + + // ParentID is the parent_id of the project. + ParentID string `json:"parent_id"` +} + +// ProjectPage is a single page of Project results. +type ProjectPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Projects contains any results. +func (r ProjectPage) IsEmpty() (bool, error) { + projects, err := ExtractProjects(r) + return len(projects) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ProjectPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractProjects returns a slice of Projects contained in a single page of +// results. +func ExtractProjects(r pagination.Page) ([]Project, error) { + var s struct { + Projects []Project `json:"projects"` + } + err := (r.(ProjectPage)).ExtractInto(&s) + return s.Projects, err +} + +// Extract interprets any projectResults as a Project. +func (r projectResult) Extract() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/fixtures.go new file mode 100644 index 000000000000..caa55679a81d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/fixtures.go @@ -0,0 +1,192 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Project results. +const ListOutput = ` +{ + "projects": [ + { + "is_domain": false, + "description": "The team that is red", + "domain_id": "default", + "enabled": true, + "id": "1234", + "name": "Red Team", + "parent_id": null + }, + { + "is_domain": false, + "description": "The team that is blue", + "domain_id": "default", + "enabled": true, + "id": "9876", + "name": "Blue Team", + "parent_id": null + } + ], + "links": { + "next": null, + "previous": null + } +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "project": { + "is_domain": false, + "description": "The team that is red", + "domain_id": "default", + "enabled": true, + "id": "1234", + "name": "Red Team", + "parent_id": null + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "project": { + "description": "The team that is red", + "name": "Red Team" + } +} +` + +// UpdateRequest provides the input to an Update request. +const UpdateRequest = ` +{ + "project": { + "description": "The team that is bright red", + "name": "Bright Red Team" + } +} +` + +// UpdateOutput provides an Update response. +const UpdateOutput = ` +{ + "project": { + "is_domain": false, + "description": "The team that is bright red", + "domain_id": "default", + "enabled": true, + "id": "1234", + "name": "Bright Red Team", + "parent_id": null + } +} +` + +// RedTeam is a Project fixture. +var RedTeam = projects.Project{ + IsDomain: false, + Description: "The team that is red", + DomainID: "default", + Enabled: true, + ID: "1234", + Name: "Red Team", + ParentID: "", +} + +// BlueTeam is a Project fixture. +var BlueTeam = projects.Project{ + IsDomain: false, + Description: "The team that is blue", + DomainID: "default", + Enabled: true, + ID: "9876", + Name: "Blue Team", + ParentID: "", +} + +// UpdatedRedTeam is a Project Fixture. +var UpdatedRedTeam = projects.Project{ + IsDomain: false, + Description: "The team that is bright red", + DomainID: "default", + Enabled: true, + ID: "1234", + Name: "Bright Red Team", + ParentID: "", +} + +// ExpectedProjectSlice is the slice of projects expected to be returned from ListOutput. +var ExpectedProjectSlice = []projects.Project{RedTeam, BlueTeam} + +// HandleListProjectsSuccessfully creates an HTTP handler at `/projects` on the +// test handler mux that responds with a list of two tenants. +func HandleListProjectsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetProjectSuccessfully creates an HTTP handler at `/projects` on the +// test handler mux that responds with a single project. +func HandleGetProjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects/1234", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateProjectSuccessfully creates an HTTP handler at `/projects` on the +// test handler mux that tests project creation. +func HandleCreateProjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleDeleteProjectSuccessfully creates an HTTP handler at `/projects` on the +// test handler mux that tests project deletion. +func HandleDeleteProjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects/1234", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateProjectSuccessfully creates an HTTP handler at `/projects` on the +// test handler mux that tests project updates. +func HandleUpdateProjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects/1234", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/requests_test.go new file mode 100644 index 000000000000..9a6664889f89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/testing/requests_test.go @@ -0,0 +1,111 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListProjects(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListProjectsSuccessfully(t) + + count := 0 + err := projects.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := projects.ExtractProjects(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedProjectSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListGroupsFiltersCheck(t *testing.T) { + type test struct { + filterName string + wantErr bool + } + tests := []test{ + {"foo__contains", false}, + {"foo", true}, + {"foo_contains", true}, + {"foo__", true}, + {"__foo", true}, + } + + var listOpts projects.ListOpts + for _, _test := range tests { + listOpts.Filters = map[string]string{_test.filterName: "bar"} + _, err := listOpts.ToProjectListQuery() + + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case projects.InvalidListFilter: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestGetProject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetProjectSuccessfully(t) + + actual, err := projects.Get(client.ServiceClient(), "1234").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, RedTeam, *actual) +} + +func TestCreateProject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateProjectSuccessfully(t) + + createOpts := projects.CreateOpts{ + Name: "Red Team", + Description: "The team that is red", + } + + actual, err := projects.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, RedTeam, *actual) +} + +func TestDeleteProject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteProjectSuccessfully(t) + + res := projects.Delete(client.ServiceClient(), "1234") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateProject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateProjectSuccessfully(t) + + updateOpts := projects.UpdateOpts{ + Name: "Bright Red Team", + Description: "The team that is bright red", + } + + actual, err := projects.Update(client.ServiceClient(), "1234", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, UpdatedRedTeam, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go new file mode 100644 index 000000000000..e26cf3684ccb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/projects/urls.go @@ -0,0 +1,23 @@ +package projects + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func getURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("projects") +} + +func deleteURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} + +func updateURL(client *gophercloud.ServiceClient, projectID string) string { + return client.ServiceURL("projects", projectID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/doc.go new file mode 100644 index 000000000000..a37b05a544a7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/doc.go @@ -0,0 +1,63 @@ +/* +Package regions manages and retrieves Regions in the OpenStack Identity Service. + +Example to List Regions + + listOpts := regions.ListOpts{ + ParentRegionID: "RegionOne", + } + + allPages, err := regions.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRegions, err := regions.ExtractRegions(allPages) + if err != nil { + panic(err) + } + + for _, region := range allRegions { + fmt.Printf("%+v\n", region) + } + +Example to Create a Region + + createOpts := regions.CreateOpts{ + ID: "TestRegion", + Description: "Region for testing" + Extra: map[string]interface{}{ + "email": "testregionsupport@example.com", + } + } + + region, err := regions.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Region + + regionID := "TestRegion" + + // There is currently a bug in Keystone where updating the optional Extras + // attributes set in regions.Create is not supported, see: + // https://bugs.launchpad.net/keystone/+bug/1729933 + updateOpts := regions.UpdateOpts{ + Description: "Updated Description for region", + } + + region, err := regions.Update(identityClient, regionID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Region + + regionID := "TestRegion" + err := regions.Delete(identityClient, regionID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package regions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/requests.go new file mode 100644 index 000000000000..aa588fdff4a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/requests.go @@ -0,0 +1,164 @@ +package regions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToRegionListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // ParentRegionID filters the response by a parent region ID. + ParentRegionID string `q:"parent_region_id"` +} + +// ToRegionListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRegionListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the Regions to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToRegionListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RegionPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single region, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToRegionCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a region. +type CreateOpts struct { + // ID is the ID of the new region. + ID string `json:"id,omitempty"` + + // Description is a description of the region. + Description string `json:"description,omitempty"` + + // ParentRegionID is the ID of the parent the region to add this region under. + ParentRegionID string `json:"parent_region_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the region. + Extra map[string]interface{} `json:"-"` +} + +// ToRegionCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToRegionCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "region") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["region"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Region. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRegionCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToRegionUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a region. +type UpdateOpts struct { + // Description is a description of the region. + Description string `json:"description,omitempty"` + + // ParentRegionID is the ID of the parent region. + ParentRegionID string `json:"parent_region_id,omitempty"` + + /* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // The following lines should be uncommented once the fix is merged. + + // Extra is free-form extra key/value pairs to describe the region. + Extra map[string]interface{} `json:"-"` + */ +} + +// ToRegionUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToRegionUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "region") + if err != nil { + return nil, err + } + + /* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // The following lines should be uncommented once the fix is merged. + + if opts.Extra != nil { + if v, ok := b["region"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + */ + + return b, nil +} + +// Update updates an existing Region. +func Update(client *gophercloud.ServiceClient, regionID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRegionUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, regionID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a region. +func Delete(client *gophercloud.ServiceClient, regionID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, regionID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/results.go new file mode 100644 index 000000000000..a60cb3488338 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/results.go @@ -0,0 +1,129 @@ +package regions + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Region helps manage related users. +type Region struct { + // Description describes the region purpose. + Description string `json:"description"` + + // ID is the unique ID of the region. + ID string `json:"id"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // Links contains referencing links to the region. + Links map[string]interface{} `json:"links"` + + // ParentRegionID is the ID of the parent region. + ParentRegionID string `json:"parent_region_id"` +} + +func (r *Region) UnmarshalJSON(b []byte) error { + type tmp Region + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Region(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Region{}, resultMap) + } + } + + return err +} + +type regionResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Region. +type GetResult struct { + regionResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Region. +type CreateResult struct { + regionResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Region. +type UpdateResult struct { + regionResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RegionPage is a single page of Region results. +type RegionPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Regions contains any results. +func (r RegionPage) IsEmpty() (bool, error) { + regions, err := ExtractRegions(r) + return len(regions) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r RegionPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractRegions returns a slice of Regions contained in a single page of results. +func ExtractRegions(r pagination.Page) ([]Region, error) { + var s struct { + Regions []Region `json:"regions"` + } + err := (r.(RegionPage)).ExtractInto(&s) + return s.Regions, err +} + +// Extract interprets any region results as a Region. +func (r regionResult) Extract() (*Region, error) { + var s struct { + Region *Region `json:"region"` + } + err := r.ExtractInto(&s) + return s.Region, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/fixtures.go new file mode 100644 index 000000000000..dee57c52af28 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/fixtures.go @@ -0,0 +1,228 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/regions" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Region results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/regions" + }, + "regions": [ + { + "id": "RegionOne-East", + "description": "East sub-region of RegionOne", + "links": { + "self": "http://example.com/identity/v3/regions/RegionOne-East" + }, + "parent_region_id": "RegionOne" + }, + { + "id": "RegionOne-West", + "description": "West sub-region of RegionOne", + "links": { + "self": "https://example.com/identity/v3/regions/RegionOne-West" + }, + "extra": { + "email": "westsupport@example.com" + }, + "parent_region_id": "RegionOne" + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "region": { + "id": "RegionOne-West", + "description": "West sub-region of RegionOne", + "links": { + "self": "https://example.com/identity/v3/regions/RegionOne-West" + }, + "name": "support", + "extra": { + "email": "westsupport@example.com" + }, + "parent_region_id": "RegionOne" + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "region": { + "id": "RegionOne-West", + "description": "West sub-region of RegionOne", + "email": "westsupport@example.com", + "parent_region_id": "RegionOne" + } +} +` + +/* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // The following line should be added to region in UpdateRequest once the + // fix is merged. + + "email": "1stwestsupport@example.com" +*/ +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "region": { + "description": "First West sub-region of RegionOne" + } +} +` + +/* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // This following line should replace the email in UpdateOutput.extra once + // the fix is merged. + + "email": "1stwestsupport@example.com" +*/ +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "region": { + "id": "RegionOne-West", + "links": { + "self": "https://example.com/identity/v3/regions/RegionOne-West" + }, + "description": "First West sub-region of RegionOne", + "extra": { + "email": "westsupport@example.com" + }, + "parent_region_id": "RegionOne" + } +} +` + +// FirstRegion is the first region in the List request. +var FirstRegion = regions.Region{ + ID: "RegionOne-East", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/regions/RegionOne-East", + }, + Description: "East sub-region of RegionOne", + Extra: map[string]interface{}{}, + ParentRegionID: "RegionOne", +} + +// SecondRegion is the second region in the List request. +var SecondRegion = regions.Region{ + ID: "RegionOne-West", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/regions/RegionOne-West", + }, + Description: "West sub-region of RegionOne", + Extra: map[string]interface{}{ + "email": "westsupport@example.com", + }, + ParentRegionID: "RegionOne", +} + +/* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // This should replace the email in SecondRegionUpdated.Extra once the fix + // is merged. + + "email": "1stwestsupport@example.com" +*/ +// SecondRegionUpdated is the second region in the List request. +var SecondRegionUpdated = regions.Region{ + ID: "RegionOne-West", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/regions/RegionOne-West", + }, + Description: "First West sub-region of RegionOne", + Extra: map[string]interface{}{ + "email": "westsupport@example.com", + }, + ParentRegionID: "RegionOne", +} + +// ExpectedRegionsSlice is the slice of regions expected to be returned from ListOutput. +var ExpectedRegionsSlice = []regions.Region{FirstRegion, SecondRegion} + +// HandleListRegionsSuccessfully creates an HTTP handler at `/regions` on the +// test handler mux that responds with a list of two regions. +func HandleListRegionsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/regions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetRegionSuccessfully creates an HTTP handler at `/regions` on the +// test handler mux that responds with a single region. +func HandleGetRegionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/regions/RegionOne-West", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateRegionSuccessfully creates an HTTP handler at `/regions` on the +// test handler mux that tests region creation. +func HandleCreateRegionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/regions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleUpdateRegionSuccessfully creates an HTTP handler at `/regions` on the +// test handler mux that tests region update. +func HandleUpdateRegionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/regions/RegionOne-West", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandleDeleteRegionSuccessfully creates an HTTP handler at `/regions` on the +// test handler mux that tests region deletion. +func HandleDeleteRegionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/regions/RegionOne-West", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/requests_test.go new file mode 100644 index 000000000000..7f5755704863 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/testing/requests_test.go @@ -0,0 +1,105 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/regions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListRegions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListRegionsSuccessfully(t) + + count := 0 + err := regions.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := regions.ExtractRegions(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedRegionsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListRegionsAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListRegionsSuccessfully(t) + + allPages, err := regions.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := regions.ExtractRegions(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRegionsSlice, actual) + th.AssertEquals(t, ExpectedRegionsSlice[1].Extra["email"], "westsupport@example.com") +} + +func TestGetRegion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetRegionSuccessfully(t) + + actual, err := regions.Get(client.ServiceClient(), "RegionOne-West").Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRegion, *actual) +} + +func TestCreateRegion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateRegionSuccessfully(t) + + createOpts := regions.CreateOpts{ + ID: "RegionOne-West", + Description: "West sub-region of RegionOne", + Extra: map[string]interface{}{ + "email": "westsupport@example.com", + }, + ParentRegionID: "RegionOne", + } + + actual, err := regions.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRegion, *actual) +} + +func TestUpdateRegion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateRegionSuccessfully(t) + + updateOpts := regions.UpdateOpts{ + Description: "First West sub-region of RegionOne", + /* + // Due to a bug in Keystone, the Extra column of the Region table + // is not updatable, see: https://bugs.launchpad.net/keystone/+bug/1729933 + // The following lines should be uncommented once the fix is merged. + + Extra: map[string]interface{}{ + "email": "1stwestsupport@example.com", + }, + */ + } + + actual, err := regions.Update(client.ServiceClient(), "RegionOne-West", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRegionUpdated, *actual) +} + +func TestDeleteRegion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteRegionSuccessfully(t) + + res := regions.Delete(client.ServiceClient(), "RegionOne-West") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/urls.go new file mode 100644 index 000000000000..150ecc835824 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/regions/urls.go @@ -0,0 +1,23 @@ +package regions + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("regions") +} + +func getURL(client *gophercloud.ServiceClient, regionID string) string { + return client.ServiceURL("regions", regionID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("regions") +} + +func updateURL(client *gophercloud.ServiceClient, regionID string) string { + return client.ServiceURL("regions", regionID) +} + +func deleteURL(client *gophercloud.ServiceClient, regionID string) string { + return client.ServiceURL("regions", regionID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go new file mode 100644 index 000000000000..f0e4d045e961 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/doc.go @@ -0,0 +1,135 @@ +/* +Package roles provides information and interaction with the roles API +resource for the OpenStack Identity service. + +Example to List Roles + + listOpts := roles.ListOpts{ + DomainID: "default", + } + + allPages, err := roles.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to Create a Role + + createOpts := roles.CreateOpts{ + Name: "read-only-admin", + DomainID: "default", + Extra: map[string]interface{}{ + "description": "this role grants read-only privilege cross tenant", + } + } + + role, err := roles.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Role + + roleID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := roles.UpdateOpts{ + Name: "read only admin", + } + + role, err := roles.Update(identityClient, roleID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Role + + roleID := "0fe36e73809d46aeae6705c39077b1b3" + err := roles.Delete(identityClient, roleID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Role Assignments + + listOpts := roles.ListAssignmentsOpts{ + UserID: "97061de2ed0647b28a393c36ab584f39", + ScopeProjectID: "9df1a02f5eb2416a9781e8b0c022d3ae", + } + + allPages, err := roles.ListAssignments(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoleAssignments(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to List Role Assignments for a User on a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + listAssignmentsOnResourceOpts := roles.ListAssignmentsOnResourceOpts{ + UserID: userID, + ProjectID: projectID, + } + + allPages, err := roles.ListAssignmentsOnResource(identityClient, listAssignmentsOnResourceOpts).AllPages() + if err != nil { + panic(err) + } + + allRoles, err := roles.ExtractRoles(allPages) + if err != nil { + panic(err) + } + + for _, role := range allRoles { + fmt.Printf("%+v\n", role) + } + +Example to Assign a Role to a User in a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.Assign(identityClient, roleID, roles.AssignOpts{ + UserID: userID, + ProjectID: projectID, + }).ExtractErr() + + if err != nil { + panic(err) + } + +Example to Unassign a Role From a User in a Project + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + userID := "9df1a02f5eb2416a9781e8b0c022d3ae" + roleID := "9fe2ff9ee4384b1894a90878d3e92bab" + + err := roles.Unassign(identityClient, roleID, roles.UnassignOpts{ + UserID: userID, + ProjectID: projectID, + }).ExtractErr() + + if err != nil { + panic(err) + } +*/ +package roles diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go new file mode 100644 index 000000000000..b60d7d18b611 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/errors.go @@ -0,0 +1,17 @@ +package roles + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go new file mode 100644 index 000000000000..573db9ea201a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/requests.go @@ -0,0 +1,392 @@ +package roles + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToRoleListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Name filters the response by role name. + Name string `q:"name"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToRoleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRoleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the roles to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToRoleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single role, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToRoleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a role. +type CreateOpts struct { + // Name is the name of the new role. + Name string `json:"name" required:"true"` + + // DomainID is the ID of the domain the role belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Extra is free-form extra key/value pairs to describe the role. + Extra map[string]interface{} `json:"-"` +} + +// ToRoleCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToRoleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "role") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["role"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new Role. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRoleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToRoleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a role. +type UpdateOpts struct { + // Name is the name of the new role. + Name string `json:"name,omitempty"` + + // Extra is free-form extra key/value pairs to describe the role. + Extra map[string]interface{} `json:"-"` +} + +// ToRoleUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToRoleUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "role") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["role"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Role. +func Update(client *gophercloud.ServiceClient, roleID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRoleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, roleID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes a role. +func Delete(client *gophercloud.ServiceClient, roleID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, roleID), nil) + return +} + +// ListAssignmentsOptsBuilder allows extensions to add additional parameters to +// the ListAssignments request. +type ListAssignmentsOptsBuilder interface { + ToRolesListAssignmentsQuery() (string, error) +} + +// ListAssignmentsOpts allows you to query the ListAssignments method. +// Specify one of or a combination of GroupId, RoleId, ScopeDomainId, +// ScopeProjectId, and/or UserId to search for roles assigned to corresponding +// entities. +type ListAssignmentsOpts struct { + // GroupID is the group ID to query. + GroupID string `q:"group.id"` + + // RoleID is the specific role to query assignments to. + RoleID string `q:"role.id"` + + // ScopeDomainID filters the results by the given domain ID. + ScopeDomainID string `q:"scope.domain.id"` + + // ScopeProjectID filters the results by the given Project ID. + ScopeProjectID string `q:"scope.project.id"` + + // UserID filterst he results by the given User ID. + UserID string `q:"user.id"` + + // Effective lists effective assignments at the user, project, and domain + // level, allowing for the effects of group membership. + Effective *bool `q:"effective"` +} + +// ToRolesListAssignmentsQuery formats a ListAssignmentsOpts into a query string. +func (opts ListAssignmentsOpts) ToRolesListAssignmentsQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListAssignments enumerates the roles assigned to a specified resource. +func ListAssignments(client *gophercloud.ServiceClient, opts ListAssignmentsOptsBuilder) pagination.Pager { + url := listAssignmentsURL(client) + if opts != nil { + query, err := opts.ToRolesListAssignmentsQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RoleAssignmentPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListAssignmentsOnResourceOpts provides options to list role assignments +// for a user/group on a project/domain +type ListAssignmentsOnResourceOpts struct { + // UserID is the ID of a user to assign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to assign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// AssignOpts provides options to assign a role +type AssignOpts struct { + // UserID is the ID of a user to assign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to assign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to assign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// UnassignOpts provides options to unassign a role +type UnassignOpts struct { + // UserID is the ID of a user to unassign a role + // Note: exactly one of UserID or GroupID must be provided + UserID string `xor:"GroupID"` + + // GroupID is the ID of a group to unassign a role + // Note: exactly one of UserID or GroupID must be provided + GroupID string `xor:"UserID"` + + // ProjectID is the ID of a project to unassign a role on + // Note: exactly one of ProjectID or DomainID must be provided + ProjectID string `xor:"DomainID"` + + // DomainID is the ID of a domain to unassign a role on + // Note: exactly one of ProjectID or DomainID must be provided + DomainID string `xor:"ProjectID"` +} + +// ListAssignmentsOnResource is the operation responsible for listing role +// assignments for a user/group on a project/domain. +func ListAssignmentsOnResource(client *gophercloud.ServiceClient, opts ListAssignmentsOnResourceOpts) pagination.Pager { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return pagination.Pager{Err: err} + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + url := listAssignmentsOnResourceURL(client, targetType, targetID, actorType, actorID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return RolePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Assign is the operation responsible for assigning a role +// to a user/group on a project/domain. +func Assign(client *gophercloud.ServiceClient, roleID string, opts AssignOpts) (r AssignmentResult) { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + r.Err = err + return + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + _, r.Err = client.Put(assignURL(client, targetType, targetID, actorType, actorID, roleID), nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Unassign is the operation responsible for unassigning a role +// from a user/group on a project/domain. +func Unassign(client *gophercloud.ServiceClient, roleID string, opts UnassignOpts) (r UnassignmentResult) { + // Check xor conditions + _, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + r.Err = err + return + } + + // Get corresponding URL + var targetID string + var targetType string + if opts.ProjectID != "" { + targetID = opts.ProjectID + targetType = "projects" + } else { + targetID = opts.DomainID + targetType = "domains" + } + + var actorID string + var actorType string + if opts.UserID != "" { + actorID = opts.UserID + actorType = "users" + } else { + actorID = opts.GroupID + actorType = "groups" + } + + _, r.Err = client.Delete(assignURL(client, targetType, targetID, actorType, actorID, roleID), &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go new file mode 100644 index 000000000000..af8fd9e6a7a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/results.go @@ -0,0 +1,214 @@ +package roles + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Role grants permissions to a user. +type Role struct { + // DomainID is the domain ID the role belongs to. + DomainID string `json:"domain_id"` + + // ID is the unique ID of the role. + ID string `json:"id"` + + // Links contains referencing links to the role. + Links map[string]interface{} `json:"links"` + + // Name is the role name + Name string `json:"name"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` +} + +func (r *Role) UnmarshalJSON(b []byte) error { + type tmp Role + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Role(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Role{}, resultMap) + } + } + + return err +} + +type roleResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a Role. +type GetResult struct { + roleResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a Role +type CreateResult struct { + roleResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a Role. +type UpdateResult struct { + roleResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// RolePage is a single page of Role results. +type RolePage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Roles contains any results. +func (r RolePage) IsEmpty() (bool, error) { + roles, err := ExtractRoles(r) + return len(roles) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r RolePage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractProjects returns a slice of Roles contained in a single page of +// results. +func ExtractRoles(r pagination.Page) ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := (r.(RolePage)).ExtractInto(&s) + return s.Roles, err +} + +// Extract interprets any roleResults as a Role. +func (r roleResult) Extract() (*Role, error) { + var s struct { + Role *Role `json:"role"` + } + err := r.ExtractInto(&s) + return s.Role, err +} + +// RoleAssignment is the result of a role assignments query. +type RoleAssignment struct { + Role AssignedRole `json:"role,omitempty"` + Scope Scope `json:"scope,omitempty"` + User User `json:"user,omitempty"` + Group Group `json:"group,omitempty"` +} + +// AssignedRole represents a Role in an assignment. +type AssignedRole struct { + ID string `json:"id,omitempty"` +} + +// Scope represents a scope in a Role assignment. +type Scope struct { + Domain Domain `json:"domain,omitempty"` + Project Project `json:"project,omitempty"` +} + +// Domain represents a domain in a role assignment scope. +type Domain struct { + ID string `json:"id,omitempty"` +} + +// Project represents a project in a role assignment scope. +type Project struct { + ID string `json:"id,omitempty"` +} + +// User represents a user in a role assignment scope. +type User struct { + ID string `json:"id,omitempty"` +} + +// Group represents a group in a role assignment scope. +type Group struct { + ID string `json:"id,omitempty"` +} + +// RoleAssignmentPage is a single page of RoleAssignments results. +type RoleAssignmentPage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the RoleAssignmentPage contains no results. +func (r RoleAssignmentPage) IsEmpty() (bool, error) { + roleAssignments, err := ExtractRoleAssignments(r) + return len(roleAssignments) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r RoleAssignmentPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + } `json:"links"` + } + err := r.ExtractInto(&s) + return s.Links.Next, err +} + +// ExtractRoleAssignments extracts a slice of RoleAssignments from a Collection +// acquired from List. +func ExtractRoleAssignments(r pagination.Page) ([]RoleAssignment, error) { + var s struct { + RoleAssignments []RoleAssignment `json:"role_assignments"` + } + err := (r.(RoleAssignmentPage)).ExtractInto(&s) + return s.RoleAssignments, err +} + +// AssignmentResult represents the result of an assign operation. +// Call ExtractErr method to determine if the request succeeded or failed. +type AssignmentResult struct { + gophercloud.ErrResult +} + +// UnassignmentResult represents the result of an unassign operation. +// Call ExtractErr method to determine if the request succeeded or failed. +type UnassignmentResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/doc.go new file mode 100644 index 000000000000..f4c5dab5b7da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/doc.go @@ -0,0 +1,2 @@ +// roles unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/fixtures.go new file mode 100644 index 000000000000..9bcc2c7d07e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/fixtures.go @@ -0,0 +1,428 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/roles" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Role results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/roles" + }, + "roles": [ + { + "domain_id": "default", + "id": "2844b2a08be147a08ef58317d6471f1f", + "links": { + "self": "http://example.com/identity/v3/roles/2844b2a08be147a08ef58317d6471f1f" + }, + "name": "admin-read-only" + }, + { + "domain_id": "1789d1", + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/roles/9fe1d3" + }, + "name": "support", + "extra": { + "description": "read-only support role" + } + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "role": { + "domain_id": "1789d1", + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/roles/9fe1d3" + }, + "name": "support", + "extra": { + "description": "read-only support role" + } + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "role": { + "domain_id": "1789d1", + "name": "support", + "description": "read-only support role" + } +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "role": { + "description": "admin read-only support role" + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "role": { + "domain_id": "1789d1", + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/roles/9fe1d3" + }, + "name": "support", + "extra": { + "description": "admin read-only support role" + } + } +} +` + +const ListAssignmentOutput = ` +{ + "role_assignments": [ + { + "links": { + "assignment": "http://identity:35357/v3/domains/161718/users/313233/roles/123456" + }, + "role": { + "id": "123456" + }, + "scope": { + "domain": { + "id": "161718" + } + }, + "user": { + "id": "313233" + } + }, + { + "links": { + "assignment": "http://identity:35357/v3/projects/456789/groups/101112/roles/123456", + "membership": "http://identity:35357/v3/groups/101112/users/313233" + }, + "role": { + "id": "123456" + }, + "scope": { + "project": { + "id": "456789" + } + }, + "user": { + "id": "313233" + } + } + ], + "links": { + "self": "http://identity:35357/v3/role_assignments?effective", + "previous": null, + "next": null + } +} +` + +// ListAssignmentsOnResourceOutput provides a result of ListAssignmentsOnResource request. +const ListAssignmentsOnResourceOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/projects/9e5a15/users/b964a9/roles" + }, + "roles": [ + { + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/roles/9fe1d3" + }, + "name": "support", + "extra": { + "description": "read-only support role" + } + } + ] +} +` + +// FirstRole is the first role in the List request. +var FirstRole = roles.Role{ + DomainID: "default", + ID: "2844b2a08be147a08ef58317d6471f1f", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/roles/2844b2a08be147a08ef58317d6471f1f", + }, + Name: "admin-read-only", + Extra: map[string]interface{}{}, +} + +// SecondRole is the second role in the List request. +var SecondRole = roles.Role{ + DomainID: "1789d1", + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/roles/9fe1d3", + }, + Name: "support", + Extra: map[string]interface{}{ + "description": "read-only support role", + }, +} + +// SecondRoleUpdated is how SecondRole should look after an Update. +var SecondRoleUpdated = roles.Role{ + DomainID: "1789d1", + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/roles/9fe1d3", + }, + Name: "support", + Extra: map[string]interface{}{ + "description": "admin read-only support role", + }, +} + +// ExpectedRolesSlice is the slice of roles expected to be returned from ListOutput. +var ExpectedRolesSlice = []roles.Role{FirstRole, SecondRole} + +// HandleListRolesSuccessfully creates an HTTP handler at `/roles` on the +// test handler mux that responds with a list of two roles. +func HandleListRolesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetRoleSuccessfully creates an HTTP handler at `/roles` on the +// test handler mux that responds with a single role. +func HandleGetRoleSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/roles/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateRoleSuccessfully creates an HTTP handler at `/roles` on the +// test handler mux that tests role creation. +func HandleCreateRoleSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/roles", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleUpdateRoleSuccessfully creates an HTTP handler at `/roles` on the +// test handler mux that tests role update. +func HandleUpdateRoleSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/roles/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandleDeleteRoleSuccessfully creates an HTTP handler at `/roles` on the +// test handler mux that tests role deletion. +func HandleDeleteRoleSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/roles/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandleAssignSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects/{project_id}/users/{user_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/projects/{project_id}/groups/{group_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/domains/{domain_id}/users/{user_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/domains/{domain_id}/groups/{group_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +func HandleUnassignSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/projects/{project_id}/users/{user_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/projects/{project_id}/groups/{group_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/domains/{domain_id}/users/{user_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + th.Mux.HandleFunc("/domains/{domain_id}/groups/{group_id}/roles/{role_id}", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +// FirstRoleAssignment is the first role assignment in the List request. +var FirstRoleAssignment = roles.RoleAssignment{ + Role: roles.AssignedRole{ID: "123456"}, + Scope: roles.Scope{Domain: roles.Domain{ID: "161718"}}, + User: roles.User{ID: "313233"}, + Group: roles.Group{}, +} + +// SecondRoleAssignemnt is the second role assignemnt in the List request. +var SecondRoleAssignment = roles.RoleAssignment{ + Role: roles.AssignedRole{ID: "123456"}, + Scope: roles.Scope{Project: roles.Project{ID: "456789"}}, + User: roles.User{ID: "313233"}, + Group: roles.Group{}, +} + +// ExpectedRoleAssignmentsSlice is the slice of role assignments expected to be +// returned from ListAssignmentOutput. +var ExpectedRoleAssignmentsSlice = []roles.RoleAssignment{FirstRoleAssignment, SecondRoleAssignment} + +// HandleListRoleAssignmentsSuccessfully creates an HTTP handler at `/role_assignments` on the +// test handler mux that responds with a list of two role assignments. +func HandleListRoleAssignmentsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/role_assignments", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListAssignmentOutput) + }) +} + +// RoleOnResource is the role in the ListAssignmentsOnResource request. +var RoleOnResource = roles.Role{ + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/roles/9fe1d3", + }, + Name: "support", + Extra: map[string]interface{}{ + "description": "read-only support role", + }, +} + +// ExpectedRolesOnResourceSlice is the slice of roles expected to be returned +// from ListAssignmentsOnResourceOutput. +var ExpectedRolesOnResourceSlice = []roles.Role{RoleOnResource} + +func HandleListAssignmentsOnResourceSuccessfully_ProjectsUsers(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListAssignmentsOnResourceOutput) + } + + th.Mux.HandleFunc("/projects/{project_id}/users/{user_id}/roles", fn) +} + +func HandleListAssignmentsOnResourceSuccessfully_ProjectsGroups(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListAssignmentsOnResourceOutput) + } + + th.Mux.HandleFunc("/projects/{project_id}/groups/{group_id}/roles", fn) +} + +func HandleListAssignmentsOnResourceSuccessfully_DomainsUsers(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListAssignmentsOnResourceOutput) + } + + th.Mux.HandleFunc("/domains/{domain_id}/users/{user_id}/roles", fn) +} + +func HandleListAssignmentsOnResourceSuccessfully_DomainsGroups(t *testing.T) { + fn := func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListAssignmentsOnResourceOutput) + } + + th.Mux.HandleFunc("/domains/{domain_id}/groups/{group_id}/roles", fn) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/requests_test.go new file mode 100644 index 000000000000..c8ac5a9b0321 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/testing/requests_test.go @@ -0,0 +1,296 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/roles" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListRoles(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListRolesSuccessfully(t) + + count := 0 + err := roles.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedRolesSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListRolesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListRolesSuccessfully(t) + + allPages, err := roles.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := roles.ExtractRoles(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRolesSlice, actual) + th.AssertEquals(t, ExpectedRolesSlice[1].Extra["description"], "read-only support role") +} + +func TestListUsersFiltersCheck(t *testing.T) { + type test struct { + filterName string + wantErr bool + } + tests := []test{ + {"foo__contains", false}, + {"foo", true}, + {"foo_contains", true}, + {"foo__", true}, + {"__foo", true}, + } + + var listOpts roles.ListOpts + for _, _test := range tests { + listOpts.Filters = map[string]string{_test.filterName: "bar"} + _, err := listOpts.ToRoleListQuery() + + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case roles.InvalidListFilter: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestGetRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetRoleSuccessfully(t) + + actual, err := roles.Get(client.ServiceClient(), "9fe1d3").Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRole, *actual) +} + +func TestCreateRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateRoleSuccessfully(t) + + createOpts := roles.CreateOpts{ + Name: "support", + DomainID: "1789d1", + Extra: map[string]interface{}{ + "description": "read-only support role", + }, + } + + actual, err := roles.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRole, *actual) +} + +func TestUpdateRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateRoleSuccessfully(t) + + updateOpts := roles.UpdateOpts{ + Extra: map[string]interface{}{ + "description": "admin read-only support role", + }, + } + + actual, err := roles.Update(client.ServiceClient(), "9fe1d3", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondRoleUpdated, *actual) +} + +func TestDeleteRole(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteRoleSuccessfully(t) + + res := roles.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestListAssignmentsSinglePage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListRoleAssignmentsSuccessfully(t) + + count := 0 + err := roles.ListAssignments(client.ServiceClient(), roles.ListAssignmentsOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := roles.ExtractRoleAssignments(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedRoleAssignmentsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAssignmentsOnResource_ProjectsUsers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListAssignmentsOnResourceSuccessfully_ProjectsUsers(t) + + count := 0 + err := roles.ListAssignmentsOnResource(client.ServiceClient(), roles.ListAssignmentsOnResourceOpts{ + UserID: "{user_id}", + ProjectID: "{project_id}", + }).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRolesOnResourceSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAssignmentsOnResource_DomainsUsers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListAssignmentsOnResourceSuccessfully_DomainsUsers(t) + + count := 0 + err := roles.ListAssignmentsOnResource(client.ServiceClient(), roles.ListAssignmentsOnResourceOpts{ + UserID: "{user_id}", + DomainID: "{domain_id}", + }).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRolesOnResourceSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAssignmentsOnResource_ProjectsGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListAssignmentsOnResourceSuccessfully_ProjectsGroups(t) + + count := 0 + err := roles.ListAssignmentsOnResource(client.ServiceClient(), roles.ListAssignmentsOnResourceOpts{ + GroupID: "{group_id}", + ProjectID: "{project_id}", + }).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRolesOnResourceSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAssignmentsOnResource_DomainsGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListAssignmentsOnResourceSuccessfully_DomainsGroups(t) + + count := 0 + err := roles.ListAssignmentsOnResource(client.ServiceClient(), roles.ListAssignmentsOnResourceOpts{ + GroupID: "{group_id}", + DomainID: "{domain_id}", + }).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := roles.ExtractRoles(page) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedRolesOnResourceSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestAssign(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAssignSuccessfully(t) + + err := roles.Assign(client.ServiceClient(), "{role_id}", roles.AssignOpts{ + UserID: "{user_id}", + ProjectID: "{project_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Assign(client.ServiceClient(), "{role_id}", roles.AssignOpts{ + UserID: "{user_id}", + DomainID: "{domain_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Assign(client.ServiceClient(), "{role_id}", roles.AssignOpts{ + GroupID: "{group_id}", + ProjectID: "{project_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Assign(client.ServiceClient(), "{role_id}", roles.AssignOpts{ + GroupID: "{group_id}", + DomainID: "{domain_id}", + }).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUnassign(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUnassignSuccessfully(t) + + err := roles.Unassign(client.ServiceClient(), "{role_id}", roles.UnassignOpts{ + UserID: "{user_id}", + ProjectID: "{project_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Unassign(client.ServiceClient(), "{role_id}", roles.UnassignOpts{ + UserID: "{user_id}", + DomainID: "{domain_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Unassign(client.ServiceClient(), "{role_id}", roles.UnassignOpts{ + GroupID: "{group_id}", + ProjectID: "{project_id}", + }).ExtractErr() + th.AssertNoErr(t, err) + + err = roles.Unassign(client.ServiceClient(), "{role_id}", roles.UnassignOpts{ + GroupID: "{group_id}", + DomainID: "{domain_id}", + }).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go new file mode 100644 index 000000000000..2b82011424b0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/roles/urls.go @@ -0,0 +1,39 @@ +package roles + +import "github.com/gophercloud/gophercloud" + +const ( + rolePath = "roles" +) + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(rolePath) +} + +func getURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(rolePath) +} + +func updateURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func deleteURL(client *gophercloud.ServiceClient, roleID string) string { + return client.ServiceURL(rolePath, roleID) +} + +func listAssignmentsURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("role_assignments") +} + +func listAssignmentsOnResourceURL(client *gophercloud.ServiceClient, targetType, targetID, actorType, actorID string) string { + return client.ServiceURL(targetType, targetID, actorType, actorID, rolePath) +} + +func assignURL(client *gophercloud.ServiceClient, targetType, targetID, actorType, actorID, roleID string) string { + return client.ServiceURL(targetType, targetID, actorType, actorID, rolePath, roleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go new file mode 100644 index 000000000000..81702359ac8d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/doc.go @@ -0,0 +1,66 @@ +/* +Package services provides information and interaction with the services API +resource for the OpenStack Identity service. + +Example to List Services + + listOpts := services.ListOpts{ + ServiceType: "compute", + } + + allPages, err := services.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allServices, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } + +Example to Create a Service + + createOpts := services.CreateOpts{ + Type: "compute", + Extra: map[string]interface{}{ + "name": "compute-service", + "description": "Compute Service", + }, + } + + service, err := services.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Service + + serviceID := "3c7bbe9a6ecb453ca1789586291380ed" + + var iFalse bool = false + updateOpts := services.UpdateOpts{ + Enabled: &iFalse, + Extra: map[string]interface{}{ + "description": "Disabled Compute Service" + }, + } + + service, err := services.Update(identityClient, serviceID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Service + + serviceID := "3c7bbe9a6ecb453ca1789586291380ed" + err := services.Delete(identityClient, serviceID).ExtractErr() + if err != nil { + panic(err) + } + +*/ +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go new file mode 100644 index 000000000000..1a8b35232a1c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/requests.go @@ -0,0 +1,154 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a service. +type CreateOpts struct { + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the service. + Extra map[string]interface{} `json:"-"` +} + +// ToServiceCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToServiceCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "service") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["service"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create adds a new service of the requested type to the catalog. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// ListOptsBuilder enables extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToServiceListMap() (string, error) +} + +// ListOpts provides options for filtering the List results. +type ListOpts struct { + // ServiceType filter the response by a type of service. + ServiceType string `q:"type"` + + // Name filters the response by a service name. + Name string `q:"name"` +} + +// ToServiceListMap builds a list query from the list options. +func (opts ListOpts) ToServiceListMap() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List enumerates the services available to a specific user. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToServiceListMap() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get returns additional information about a service, given its ID. +func Get(client *gophercloud.ServiceClient, serviceID string) (r GetResult) { + _, r.Err = client.Get(serviceURL(client, serviceID), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a service. +type UpdateOpts struct { + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the service. + Extra map[string]interface{} `json:"-"` +} + +// ToServiceUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToServiceUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "service") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["service"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing Service. +func Update(client *gophercloud.ServiceClient, serviceID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, serviceID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete removes an existing service. +// It either deletes all associated endpoints, or fails until all endpoints +// are deleted. +func Delete(client *gophercloud.ServiceClient, serviceID string) (r DeleteResult) { + _, r.Err = client.Delete(serviceURL(client, serviceID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go new file mode 100644 index 000000000000..f3484d7a9677 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/results.go @@ -0,0 +1,131 @@ +package services + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +type serviceResult struct { + gophercloud.Result +} + +// Extract interprets a GetResult, CreateResult or UpdateResult as a concrete +// Service. An error is returned if the original call or the extraction failed. +func (r serviceResult) Extract() (*Service, error) { + var s struct { + Service *Service `json:"service"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// CreateResult is the response from a Create request. Call its Extract method +// to interpret it as a Service. +type CreateResult struct { + serviceResult +} + +// GetResult is the response from a Get request. Call its Extract method +// to interpret it as a Service. +type GetResult struct { + serviceResult +} + +// UpdateResult is the response from an Update request. Call its Extract method +// to interpret it as a Service. +type UpdateResult struct { + serviceResult +} + +// DeleteResult is the response from a Delete request. Call its ExtractErr +// method to interpret it as a Service. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Service represents an OpenStack Service. +type Service struct { + // ID is the unique ID of the service. + ID string `json:"id"` + + // Type is the type of the service. + Type string `json:"type"` + + // Enabled is whether or not the service is enabled. + Enabled bool `json:"enabled"` + + // Links contains referencing links to the service. + Links map[string]interface{} `json:"links"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` +} + +func (r *Service) UnmarshalJSON(b []byte) error { + type tmp Service + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Service(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(Service{}, resultMap) + } + } + + return err +} + +// ServicePage is a single page of Service results. +type ServicePage struct { + pagination.LinkedPageBase +} + +// IsEmpty returns true if the ServicePage contains no results. +func (p ServicePage) IsEmpty() (bool, error) { + services, err := ExtractServices(p) + return len(services) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ServicePage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractServices extracts a slice of Services from a Collection acquired +// from List. +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Services []Service `json:"services"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Services, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/doc.go new file mode 100644 index 000000000000..68b21e970e23 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/doc.go @@ -0,0 +1,2 @@ +// services unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/fixtures.go new file mode 100644 index 000000000000..fd1d632425d9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/fixtures.go @@ -0,0 +1,209 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of Service results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null + }, + "services": [ + { + "id": "1234", + "links": { + "self": "https://example.com/identity/v3/services/1234" + }, + "type": "identity", + "enabled": false, + "extra": { + "name": "service-one", + "description": "Service One" + } + }, + { + "id": "9876", + "links": { + "self": "https://example.com/identity/v3/services/9876" + }, + "type": "compute", + "enabled": false, + "extra": { + "name": "service-two", + "description": "Service Two", + "email": "service@example.com" + } + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "service": { + "id": "9876", + "links": { + "self": "https://example.com/identity/v3/services/9876" + }, + "type": "compute", + "enabled": false, + "extra": { + "name": "service-two", + "description": "Service Two", + "email": "service@example.com" + } + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "service": { + "description": "Service Two", + "email": "service@example.com", + "name": "service-two", + "type": "compute" + } +} +` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = ` +{ + "service": { + "type": "compute2", + "description": "Service Two Updated" + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "service": { + "id": "9876", + "links": { + "self": "https://example.com/identity/v3/services/9876" + }, + "type": "compute2", + "enabled": false, + "extra": { + "name": "service-two", + "description": "Service Two Updated", + "email": "service@example.com" + } + } +} +` + +// FirstService is the first service in the List request. +var FirstService = services.Service{ + ID: "1234", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/services/1234", + }, + Type: "identity", + Enabled: false, + Extra: map[string]interface{}{ + "name": "service-one", + "description": "Service One", + }, +} + +// SecondService is the second service in the List request. +var SecondService = services.Service{ + ID: "9876", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/services/9876", + }, + Type: "compute", + Enabled: false, + Extra: map[string]interface{}{ + "name": "service-two", + "description": "Service Two", + "email": "service@example.com", + }, +} + +// SecondServiceUpdated is the SecondService should look after an Update. +var SecondServiceUpdated = services.Service{ + ID: "9876", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/services/9876", + }, + Type: "compute2", + Enabled: false, + Extra: map[string]interface{}{ + "name": "service-two", + "description": "Service Two Updated", + "email": "service@example.com", + }, +} + +// ExpectedServicesSlice is the slice of services to be returned from ListOutput. +var ExpectedServicesSlice = []services.Service{FirstService, SecondService} + +// HandleListServicesSuccessfully creates an HTTP handler at `/services` on the +// test handler mux that responds with a list of two services. +func HandleListServicesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetServiceSuccessfully creates an HTTP handler at `/services` on the +// test handler mux that responds with a single service. +func HandleGetServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/9876", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateServiceSuccessfully creates an HTTP handler at `/services` on the +// test handler mux that tests service creation. +func HandleCreateServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleUpdateServiceSuccessfully creates an HTTP handler at `/services` on the +// test handler mux that tests service update. +func HandleUpdateServiceSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/services/9876", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/requests_test.go new file mode 100644 index 000000000000..2a8a89dd2417 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/testing/requests_test.go @@ -0,0 +1,107 @@ +package testing + +import ( + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/services" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateSuccessful(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateServiceSuccessfully(t) + + createOpts := services.CreateOpts{ + Type: "compute", + Extra: map[string]interface{}{ + "name": "service-two", + "description": "Service Two", + "email": "service@example.com", + }, + } + + actual, err := services.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondService, *actual) +} + +func TestListServices(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListServicesSuccessfully(t) + + count := 0 + err := services.List(client.ServiceClient(), services.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := services.ExtractServices(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedServicesSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListServicesAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListServicesSuccessfully(t) + + allPages, err := services.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := services.ExtractServices(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedServicesSlice, actual) + th.AssertEquals(t, ExpectedServicesSlice[0].Extra["name"], "service-one") + th.AssertEquals(t, ExpectedServicesSlice[1].Extra["email"], "service@example.com") +} + +func TestGetSuccessful(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetServiceSuccessfully(t) + + actual, err := services.Get(client.ServiceClient(), "9876").Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondService, *actual) + th.AssertEquals(t, SecondService.Extra["email"], "service@example.com") +} + +func TestUpdateSuccessful(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateServiceSuccessfully(t) + + updateOpts := services.UpdateOpts{ + Type: "compute2", + Extra: map[string]interface{}{ + "description": "Service Two Updated", + }, + } + actual, err := services.Update(client.ServiceClient(), "9876", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondServiceUpdated, *actual) + th.AssertEquals(t, SecondServiceUpdated.Extra["description"], "Service Two Updated") +} + +func TestDeleteSuccessful(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/services/12345", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := services.Delete(client.ServiceClient(), "12345") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go new file mode 100644 index 000000000000..caa625a20838 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/services/urls.go @@ -0,0 +1,19 @@ +package services + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("services") +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("services") +} + +func serviceURL(client *gophercloud.ServiceClient, serviceID string) string { + return client.ServiceURL("services", serviceID) +} + +func updateURL(client *gophercloud.ServiceClient, serviceID string) string { + return client.ServiceURL("services", serviceID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go new file mode 100644 index 000000000000..966e128f1281 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/doc.go @@ -0,0 +1,108 @@ +/* +Package tokens provides information and interaction with the token API +resource for the OpenStack Identity service. + +For more information, see: +http://developer.openstack.org/api-ref-identity-v3.html#tokens-v3 + +Example to Create a Token From a Username and Password + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Username, Password, and Domain + + authOptions := tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainID: "default", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + + authOptions = tokens.AuthOptions{ + UserID: "username", + Password: "password", + DomainName: "default", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token From a Token + + authOptions := tokens.AuthOptions{ + TokenID: "token_id", + } + + token, err := tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project ID Scope + + scope := tokens.Scope{ + ProjectID: "0fe36e73809d46aeae6705c39077b1b3", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Domain ID Scope + + scope := tokens.Scope{ + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +Example to Create a Token from a Username and Password with Project Name Scope + + scope := tokens.Scope{ + ProjectName: "project_name", + DomainID: "default", + } + + authOptions := tokens.AuthOptions{ + Scope: &scope, + UserID: "username", + Password: "password", + } + + token, err = tokens.Create(identityClient, authOptions).ExtractToken() + if err != nil { + panic(err) + } + +*/ +package tokens diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go new file mode 100644 index 000000000000..6e99a793c53a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/requests.go @@ -0,0 +1,153 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +// Scope allows a created token to be limited to a specific domain or project. +type Scope struct { + ProjectID string + ProjectName string + DomainID string + DomainName string +} + +// AuthOptionsBuilder provides the ability for extensions to add additional +// parameters to AuthOptions. Extensions must satisfy all required methods. +type AuthOptionsBuilder interface { + // ToTokenV3CreateMap assembles the Create request body, returning an error + // if parameters are missing or inconsistent. + ToTokenV3CreateMap(map[string]interface{}) (map[string]interface{}, error) + ToTokenV3ScopeMap() (map[string]interface{}, error) + CanReauth() bool +} + +// AuthOptions represents options for authenticating a user. +type AuthOptions struct { + // IdentityEndpoint specifies the HTTP endpoint that is required to work with + // the Identity API of the appropriate version. While it's ultimately needed + // by all of the identity services, it will often be populated by a + // provider-level function. + IdentityEndpoint string `json:"-"` + + // Username is required if using Identity V2 API. Consult with your provider's + // control panel to discover your account's username. In Identity V3, either + // UserID or a combination of Username and DomainID or DomainName are needed. + Username string `json:"username,omitempty"` + UserID string `json:"id,omitempty"` + + Password string `json:"password,omitempty"` + + // At most one of DomainID and DomainName must be provided if using Username + // with Identity V3. Otherwise, either are optional. + DomainID string `json:"-"` + DomainName string `json:"name,omitempty"` + + // AllowReauth should be set to true if you grant permission for Gophercloud + // to cache your credentials in memory, and to allow Gophercloud to attempt + // to re-authenticate automatically if/when your token expires. If you set + // it to false, it will not cache these settings, but re-authentication will + // not be possible. This setting defaults to false. + AllowReauth bool `json:"-"` + + // TokenID allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenID string `json:"-"` + + Scope Scope `json:"-"` +} + +// ToTokenV3CreateMap builds a request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3CreateMap(scope map[string]interface{}) (map[string]interface{}, error) { + gophercloudAuthOpts := gophercloud.AuthOptions{ + Username: opts.Username, + UserID: opts.UserID, + Password: opts.Password, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + AllowReauth: opts.AllowReauth, + TokenID: opts.TokenID, + } + + return gophercloudAuthOpts.ToTokenV3CreateMap(scope) +} + +// ToTokenV3CreateMap builds a scope request body from AuthOptions. +func (opts *AuthOptions) ToTokenV3ScopeMap() (map[string]interface{}, error) { + scope := gophercloud.AuthScope(opts.Scope) + + gophercloudAuthOpts := gophercloud.AuthOptions{ + Scope: &scope, + DomainID: opts.DomainID, + DomainName: opts.DomainName, + } + + return gophercloudAuthOpts.ToTokenV3ScopeMap() +} + +func (opts *AuthOptions) CanReauth() bool { + return opts.AllowReauth +} + +func subjectTokenHeaders(c *gophercloud.ServiceClient, subjectToken string) map[string]string { + return map[string]string{ + "X-Subject-Token": subjectToken, + } +} + +// Create authenticates and either generates a new token, or changes the Scope +// of an existing token. +func Create(c *gophercloud.ServiceClient, opts AuthOptionsBuilder) (r CreateResult) { + scope, err := opts.ToTokenV3ScopeMap() + if err != nil { + r.Err = err + return + } + + b, err := opts.ToTokenV3CreateMap(scope) + if err != nil { + r.Err = err + return + } + + resp, err := c.Post(tokenURL(c), b, &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"X-Auth-Token": ""}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// Get validates and retrieves information about another token. +func Get(c *gophercloud.ServiceClient, token string) (r GetResult) { + resp, err := c.Get(tokenURL(c), &r.Body, &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 203}, + }) + if resp != nil { + r.Err = err + r.Header = resp.Header + } + return +} + +// Validate determines if a specified token is valid or not. +func Validate(c *gophercloud.ServiceClient, token string) (bool, error) { + resp, err := c.Head(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + OkCodes: []int{200, 204, 404}, + }) + if err != nil { + return false, err + } + + return resp.StatusCode == 200 || resp.StatusCode == 204, nil +} + +// Revoke immediately makes specified token invalid. +func Revoke(c *gophercloud.ServiceClient, token string) (r RevokeResult) { + _, r.Err = c.Delete(tokenURL(c), &gophercloud.RequestOpts{ + MoreHeaders: subjectTokenHeaders(c, token), + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go new file mode 100644 index 000000000000..ebdca58f658f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/results.go @@ -0,0 +1,171 @@ +package tokens + +import ( + "time" + + "github.com/gophercloud/gophercloud" +) + +// Endpoint represents a single API endpoint offered by a service. +// It matches either a public, internal or admin URL. +// If supported, it contains a region specifier, again if provided. +// The significance of the Region field will depend upon your provider. +type Endpoint struct { + ID string `json:"id"` + Region string `json:"region"` + RegionID string `json:"region_id"` + Interface string `json:"interface"` + URL string `json:"url"` +} + +// CatalogEntry provides a type-safe interface to an Identity API V3 service +// catalog listing. Each class of service, such as cloud DNS or block storage +// services, could have multiple CatalogEntry representing it (one by interface +// type, e.g public, admin or internal). +// +// Note: when looking for the desired service, try, whenever possible, to key +// off the type field. Otherwise, you'll tie the representation of the service +// to a specific provider. +type CatalogEntry struct { + // Service ID + ID string `json:"id"` + + // Name will contain the provider-specified name for the service. + Name string `json:"name"` + + // Type will contain a type string if OpenStack defines a type for the + // service. Otherwise, for provider-specific services, the provider may + // assign their own type strings. + Type string `json:"type"` + + // Endpoints will let the caller iterate over all the different endpoints that + // may exist for the service. + Endpoints []Endpoint `json:"endpoints"` +} + +// ServiceCatalog provides a view into the service catalog from a previous, +// successful authentication. +type ServiceCatalog struct { + Entries []CatalogEntry `json:"catalog"` +} + +// Domain provides information about the domain to which this token grants +// access. +type Domain struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// User represents a user resource that exists in the Identity Service. +type User struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// Role provides information about roles to which User is authorized. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// Project provides information about project to which User is authorized. +type Project struct { + Domain Domain `json:"domain"` + ID string `json:"id"` + Name string `json:"name"` +} + +// commonResult is the response from a request. A commonResult has various +// methods which can be used to extract different details about the result. +type commonResult struct { + gophercloud.Result +} + +// Extract is a shortcut for ExtractToken. +// This function is deprecated and still present for backward compatibility. +func (r commonResult) Extract() (*Token, error) { + return r.ExtractToken() +} + +// ExtractToken interprets a commonResult as a Token. +func (r commonResult) ExtractToken() (*Token, error) { + var s Token + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + // Parse the token itself from the stored headers. + s.ID = r.Header.Get("X-Subject-Token") + + return &s, err +} + +// ExtractServiceCatalog returns the ServiceCatalog that was generated along +// with the user's Token. +func (r commonResult) ExtractServiceCatalog() (*ServiceCatalog, error) { + var s ServiceCatalog + err := r.ExtractInto(&s) + return &s, err +} + +// ExtractUser returns the User that is the owner of the Token. +func (r commonResult) ExtractUser() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// ExtractRoles returns Roles to which User is authorized. +func (r commonResult) ExtractRoles() ([]Role, error) { + var s struct { + Roles []Role `json:"roles"` + } + err := r.ExtractInto(&s) + return s.Roles, err +} + +// ExtractProject returns Project to which User is authorized. +func (r commonResult) ExtractProject() (*Project, error) { + var s struct { + Project *Project `json:"project"` + } + err := r.ExtractInto(&s) + return s.Project, err +} + +// CreateResult is the response from a Create request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type CreateResult struct { + commonResult +} + +// GetResult is the response from a Get request. Use ExtractToken() +// to interpret it as a Token, or ExtractServiceCatalog() to interpret it +// as a service catalog. +type GetResult struct { + commonResult +} + +// RevokeResult is response from a Revoke request. +type RevokeResult struct { + commonResult +} + +// Token is a string that grants a user access to a controlled set of services +// in an OpenStack provider. Each Token is valid for a set length of time. +type Token struct { + // ID is the issued token. + ID string `json:"id"` + + // ExpiresAt is the timestamp at which this token will no longer be accepted. + ExpiresAt time.Time `json:"expires_at"` +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.ExtractIntoStructPtr(v, "token") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/doc.go new file mode 100644 index 000000000000..a7955a717ef3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/doc.go @@ -0,0 +1,2 @@ +// tokens unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/fixtures.go new file mode 100644 index 000000000000..e6f44178a451 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/fixtures.go @@ -0,0 +1,217 @@ +package testing + +import ( + "encoding/json" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/testhelper" +) + +const testTokenID = "130f6c17-420e-4a0b-97b0-0c9cf2a05f30" + +// TokenOutput is a sample response to a Token call. +const TokenOutput = ` +{ + "token":{ + "is_domain":false, + "methods":[ + "password" + ], + "roles":[ + { + "id":"434426788d5a451faf763b0e6db5aefb", + "name":"admin" + } + ], + "expires_at":"2017-06-03T02:19:49.000000Z", + "project":{ + "domain":{ + "id":"default", + "name":"Default" + }, + "id":"a99e9b4e620e4db09a2dfb6e42a01e66", + "name":"admin" + }, + "catalog":[ + { + "endpoints":[ + { + "url":"http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + "interface":"admin", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"3eac9e7588eb4eb2a4650cf5e079505f" + }, + { + "url":"http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + "interface":"internal", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"6b33fabc69c34ea782a3f6282582b59f" + }, + { + "url":"http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + "interface":"public", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"dae63c71bee24070a71f5425e7a916b5" + } + ], + "type":"compute", + "id":"17e0fa04647d4155a7933ee624dd66da", + "name":"nova" + }, + { + "endpoints":[ + { + "url":"http://127.0.0.1:35357/v3", + "interface":"admin", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"0539aeff80954a0bb756cec496768d3d" + }, + { + "url":"http://127.0.0.1:5000/v3", + "interface":"public", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"15bdf2d0853e4c939993d29548b1b56f" + }, + { + "url":"http://127.0.0.1:5000/v3", + "interface":"internal", + "region":"RegionOne", + "region_id":"RegionOne", + "id":"3b4423c54ba343c58226bc424cb11c4b" + } + ], + "type":"identity", + "id":"1cde0ea8cb3c49d8928cb172ca825ca5", + "name":"keystone" + } + ], + "user":{ + "domain":{ + "id":"default", + "name":"Default" + }, + "password_expires_at":null, + "name":"admin", + "id":"0fe36e73809d46aeae6705c39077b1b3" + }, + "audit_ids":[ + "ysSI0bEWR0Gmrp4LHL9LFw" + ], + "issued_at":"2017-06-03T01:19:49.000000Z" + } +}` + +var expectedTokenTime, _ = time.Parse(gophercloud.RFC3339Milli, + "2017-06-03T02:19:49.000000Z") +var ExpectedToken = tokens.Token{ + ID: testTokenID, + ExpiresAt: expectedTokenTime, +} + +var catalogEntry1 = tokens.CatalogEntry{ + ID: "17e0fa04647d4155a7933ee624dd66da", + Name: "nova", + Type: "compute", + Endpoints: []tokens.Endpoint{ + tokens.Endpoint{ + ID: "3eac9e7588eb4eb2a4650cf5e079505f", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "admin", + URL: "http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + }, + tokens.Endpoint{ + ID: "6b33fabc69c34ea782a3f6282582b59f", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "internal", + URL: "http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + }, + tokens.Endpoint{ + ID: "dae63c71bee24070a71f5425e7a916b5", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "public", + URL: "http://127.0.0.1:8774/v2.1/a99e9b4e620e4db09a2dfb6e42a01e66", + }, + }, +} +var catalogEntry2 = tokens.CatalogEntry{ + ID: "1cde0ea8cb3c49d8928cb172ca825ca5", + Name: "keystone", + Type: "identity", + Endpoints: []tokens.Endpoint{ + tokens.Endpoint{ + ID: "0539aeff80954a0bb756cec496768d3d", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "admin", + URL: "http://127.0.0.1:35357/v3", + }, + tokens.Endpoint{ + ID: "15bdf2d0853e4c939993d29548b1b56f", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "public", + URL: "http://127.0.0.1:5000/v3", + }, + tokens.Endpoint{ + ID: "3b4423c54ba343c58226bc424cb11c4b", + Region: "RegionOne", + RegionID: "RegionOne", + Interface: "internal", + URL: "http://127.0.0.1:5000/v3", + }, + }, +} + +// ExpectedServiceCatalog contains expected service extracted from token response. +var ExpectedServiceCatalog = tokens.ServiceCatalog{ + Entries: []tokens.CatalogEntry{catalogEntry1, catalogEntry2}, +} + +var domain = tokens.Domain{ + ID: "default", + Name: "Default", +} + +// ExpectedUser contains expected user extracted from token response. +var ExpectedUser = tokens.User{ + Domain: domain, + ID: "0fe36e73809d46aeae6705c39077b1b3", + Name: "admin", +} + +var role = tokens.Role{ + ID: "434426788d5a451faf763b0e6db5aefb", + Name: "admin", +} + +// ExpectedRoles contains expected roles extracted from token response. +var ExpectedRoles = []tokens.Role{role} + +// ExpectedProject contains expected project extracted from token response. +var ExpectedProject = tokens.Project{ + Domain: domain, + ID: "a99e9b4e620e4db09a2dfb6e42a01e66", + Name: "admin", +} + +func getGetResult(t *testing.T) tokens.GetResult { + result := tokens.GetResult{} + result.Header = http.Header{ + "X-Subject-Token": []string{testTokenID}, + } + err := json.Unmarshal([]byte(TokenOutput), &result.Body) + testhelper.AssertNoErr(t, err) + return result +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/requests_test.go new file mode 100644 index 000000000000..3891ae4ab544 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/requests_test.go @@ -0,0 +1,551 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + "github.com/gophercloud/gophercloud/testhelper" +) + +// authTokenPost verifies that providing certain AuthOptions and Scope results in an expected JSON structure. +func authTokenPost(t *testing.T, options tokens.AuthOptions, scope *tokens.Scope, requestJSON string) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "POST") + testhelper.TestHeader(t, r, "Content-Type", "application/json") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestJSONRequest(t, r, requestJSON) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "token": { + "expires_at": "2014-10-02T13:45:00.000000Z" + } + }`) + }) + + if scope != nil { + options.Scope = *scope + } + + expected := &tokens.Token{ + ExpiresAt: time.Date(2014, 10, 2, 13, 45, 0, 0, time.UTC), + } + actual, err := tokens.Create(&client, &options).Extract() + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} + +func authTokenPostErr(t *testing.T, options tokens.AuthOptions, scope *tokens.Scope, includeToken bool, expectedErr error) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + if includeToken { + client.TokenID = "abcdef123456" + } + + if scope != nil { + options.Scope = *scope + } + + _, err := tokens.Create(&client, &options).Extract() + if err == nil { + t.Errorf("Create did NOT return an error") + } + if err != expectedErr { + t.Errorf("Create returned an unexpected error: wanted %v, got %v", expectedErr, err) + } +} + +func TestCreateUserIDAndPassword(t *testing.T) { + authTokenPost(t, tokens.AuthOptions{UserID: "me", Password: "squirrel!"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { "id": "me", "password": "squirrel!" } + } + } + } + } + `) +} + +func TestCreateUsernameDomainIDPassword(t *testing.T) { + authTokenPost(t, tokens.AuthOptions{Username: "fakey", Password: "notpassword", DomainID: "abc123"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "domain": { + "id": "abc123" + }, + "name": "fakey", + "password": "notpassword" + } + } + } + } + } + `) +} + +func TestCreateUsernameDomainNamePassword(t *testing.T) { + authTokenPost(t, tokens.AuthOptions{Username: "frank", Password: "swordfish", DomainName: "spork.net"}, nil, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "domain": { + "name": "spork.net" + }, + "name": "frank", + "password": "swordfish" + } + } + } + } + } + `) +} + +func TestCreateTokenID(t *testing.T) { + authTokenPost(t, tokens.AuthOptions{TokenID: "12345abcdef"}, nil, ` + { + "auth": { + "identity": { + "methods": ["token"], + "token": { + "id": "12345abcdef" + } + } + } + } + `) +} + +func TestCreateProjectIDScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &tokens.Scope{ProjectID: "123456"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "id": "123456" + } + } + } + } + `) +} + +func TestCreateDomainIDScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &tokens.Scope{DomainID: "1000"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "domain": { + "id": "1000" + } + } + } + } + `) +} + +func TestCreateDomainNameScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &tokens.Scope{DomainName: "evil-plans"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "domain": { + "name": "evil-plans" + } + } + } + } + `) +} + +func TestCreateProjectNameAndDomainIDScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &tokens.Scope{ProjectName: "world-domination", DomainID: "1000"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "domain": { + "id": "1000" + }, + "name": "world-domination" + } + } + } + } + `) +} + +func TestCreateProjectNameAndDomainNameScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "fenris", Password: "g0t0h311"} + scope := &tokens.Scope{ProjectName: "world-domination", DomainName: "evil-plans"} + authTokenPost(t, options, scope, ` + { + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "id": "fenris", + "password": "g0t0h311" + } + } + }, + "scope": { + "project": { + "domain": { + "name": "evil-plans" + }, + "name": "world-domination" + } + } + } + } + `) +} + +func TestCreateExtractsTokenFromResponse(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", "aaa111") + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ + "token": { + "expires_at": "2014-10-02T13:45:00.000000Z" + } + }`) + }) + + options := tokens.AuthOptions{UserID: "me", Password: "shhh"} + token, err := tokens.Create(&client, &options).Extract() + if err != nil { + t.Fatalf("Create returned an error: %v", err) + } + + if token.ID != "aaa111" { + t.Errorf("Expected token to be aaa111, but was %s", token.ID) + } +} + +func TestCreateFailureEmptyAuth(t *testing.T) { + authTokenPostErr(t, tokens.AuthOptions{}, nil, false, gophercloud.ErrMissingPassword{}) +} + +func TestCreateFailureTokenIDUsername(t *testing.T) { + authTokenPostErr(t, tokens.AuthOptions{Username: "something", TokenID: "12345"}, nil, true, gophercloud.ErrUsernameWithToken{}) +} + +func TestCreateFailureTokenIDUserID(t *testing.T) { + authTokenPostErr(t, tokens.AuthOptions{UserID: "something", TokenID: "12345"}, nil, true, gophercloud.ErrUserIDWithToken{}) +} + +func TestCreateFailureTokenIDDomainID(t *testing.T) { + authTokenPostErr(t, tokens.AuthOptions{DomainID: "something", TokenID: "12345"}, nil, true, gophercloud.ErrDomainIDWithToken{}) +} + +func TestCreateFailureTokenIDDomainName(t *testing.T) { + authTokenPostErr(t, tokens.AuthOptions{DomainName: "something", TokenID: "12345"}, nil, true, gophercloud.ErrDomainNameWithToken{}) +} + +func TestCreateFailureMissingUser(t *testing.T) { + options := tokens.AuthOptions{Password: "supersecure"} + authTokenPostErr(t, options, nil, false, gophercloud.ErrUsernameOrUserID{}) +} + +func TestCreateFailureBothUser(t *testing.T) { + options := tokens.AuthOptions{ + Password: "supersecure", + Username: "oops", + UserID: "redundancy", + } + authTokenPostErr(t, options, nil, false, gophercloud.ErrUsernameOrUserID{}) +} + +func TestCreateFailureMissingDomain(t *testing.T) { + options := tokens.AuthOptions{ + Password: "supersecure", + Username: "notuniqueenough", + } + authTokenPostErr(t, options, nil, false, gophercloud.ErrDomainIDOrDomainName{}) +} + +func TestCreateFailureBothDomain(t *testing.T) { + options := tokens.AuthOptions{ + Password: "supersecure", + Username: "someone", + DomainID: "hurf", + DomainName: "durf", + } + authTokenPostErr(t, options, nil, false, gophercloud.ErrDomainIDOrDomainName{}) +} + +func TestCreateFailureUserIDDomainID(t *testing.T) { + options := tokens.AuthOptions{ + UserID: "100", + Password: "stuff", + DomainID: "oops", + } + authTokenPostErr(t, options, nil, false, gophercloud.ErrDomainIDWithUserID{}) +} + +func TestCreateFailureUserIDDomainName(t *testing.T) { + options := tokens.AuthOptions{ + UserID: "100", + Password: "sssh", + DomainName: "oops", + } + authTokenPostErr(t, options, nil, false, gophercloud.ErrDomainNameWithUserID{}) +} + +func TestCreateFailureScopeProjectNameAlone(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{ProjectName: "notenough"} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeDomainIDOrDomainName{}) +} + +func TestCreateFailureScopeProjectNameAndID(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{ProjectName: "whoops", ProjectID: "toomuch", DomainID: "1234"} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeProjectIDOrProjectName{}) +} + +func TestCreateFailureScopeProjectIDAndDomainID(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{ProjectID: "toomuch", DomainID: "notneeded"} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeProjectIDAlone{}) +} + +func TestCreateFailureScopeProjectIDAndDomainNAme(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{ProjectID: "toomuch", DomainName: "notneeded"} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeProjectIDAlone{}) +} + +func TestCreateFailureScopeDomainIDAndDomainName(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{DomainID: "toomuch", DomainName: "notneeded"} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeDomainIDOrDomainName{}) +} + +/* +func TestCreateFailureEmptyScope(t *testing.T) { + options := tokens.AuthOptions{UserID: "myself", Password: "swordfish"} + scope := &tokens.Scope{} + authTokenPostErr(t, options, scope, false, gophercloud.ErrScopeEmpty{}) +} +*/ + +func TestGetRequest(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{ + TokenID: "12345abcdef", + }, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, "GET") + testhelper.TestHeader(t, r, "Content-Type", "") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef") + testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345") + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { "token": { "expires_at": "2014-08-29T13:10:01.000000Z" } } + `) + }) + + token, err := tokens.Get(&client, "abcdef12345").Extract() + if err != nil { + t.Errorf("Info returned an error: %v", err) + } + + expected, _ := time.Parse(time.UnixDate, "Fri Aug 29 13:10:01 UTC 2014") + if token.ExpiresAt != expected { + t.Errorf("Expected expiration time %s, but was %s", expected.Format(time.UnixDate), time.Time(token.ExpiresAt).Format(time.UnixDate)) + } +} + +func prepareAuthTokenHandler(t *testing.T, expectedMethod string, status int) gophercloud.ServiceClient { + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{ + TokenID: "12345abcdef", + }, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + testhelper.TestMethod(t, r, expectedMethod) + testhelper.TestHeader(t, r, "Content-Type", "") + testhelper.TestHeader(t, r, "Accept", "application/json") + testhelper.TestHeader(t, r, "X-Auth-Token", "12345abcdef") + testhelper.TestHeader(t, r, "X-Subject-Token", "abcdef12345") + + w.WriteHeader(status) + }) + + return client +} + +func TestValidateRequestSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusNoContent) + + ok, err := tokens.Validate(&client, "abcdef12345") + if err != nil { + t.Errorf("Unexpected error from Validate: %v", err) + } + + if !ok { + t.Errorf("Validate returned false for a valid token") + } +} + +func TestValidateRequestFailure(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusNotFound) + + ok, err := tokens.Validate(&client, "abcdef12345") + if err != nil { + t.Errorf("Unexpected error from Validate: %v", err) + } + + if ok { + t.Errorf("Validate returned true for an invalid token") + } +} + +func TestValidateRequestError(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "HEAD", http.StatusMethodNotAllowed) + + _, err := tokens.Validate(&client, "abcdef12345") + if err == nil { + t.Errorf("Missing expected error from Validate") + } +} + +func TestRevokeRequestSuccessful(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "DELETE", http.StatusNoContent) + + res := tokens.Revoke(&client, "abcdef12345") + testhelper.AssertNoErr(t, res.Err) +} + +func TestRevokeRequestError(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + client := prepareAuthTokenHandler(t, "DELETE", http.StatusNotFound) + + res := tokens.Revoke(&client, "abcdef12345") + if res.Err == nil { + t.Errorf("Missing expected error from Revoke") + } +} + +func TestNoTokenInResponse(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + client := gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{}, + Endpoint: testhelper.Endpoint(), + } + + testhelper.Mux.HandleFunc("/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{}`) + }) + + options := tokens.AuthOptions{UserID: "me", Password: "squirrel!"} + _, err := tokens.Create(&client, &options).Extract() + testhelper.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/results_test.go new file mode 100644 index 000000000000..d55a538bc18b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/testing/results_test.go @@ -0,0 +1,52 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/testhelper" +) + +func TestExtractToken(t *testing.T) { + result := getGetResult(t) + + token, err := result.ExtractToken() + testhelper.AssertNoErr(t, err) + + testhelper.CheckDeepEquals(t, &ExpectedToken, token) +} + +func TestExtractCatalog(t *testing.T) { + result := getGetResult(t) + + catalog, err := result.ExtractServiceCatalog() + testhelper.AssertNoErr(t, err) + + testhelper.CheckDeepEquals(t, &ExpectedServiceCatalog, catalog) +} + +func TestExtractUser(t *testing.T) { + result := getGetResult(t) + + user, err := result.ExtractUser() + testhelper.AssertNoErr(t, err) + + testhelper.CheckDeepEquals(t, &ExpectedUser, user) +} + +func TestExtractRoles(t *testing.T) { + result := getGetResult(t) + + roles, err := result.ExtractRoles() + testhelper.AssertNoErr(t, err) + + testhelper.CheckDeepEquals(t, ExpectedRoles, roles) +} + +func TestExtractProject(t *testing.T) { + result := getGetResult(t) + + project, err := result.ExtractProject() + testhelper.AssertNoErr(t, err) + + testhelper.CheckDeepEquals(t, &ExpectedProject, project) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go new file mode 100644 index 000000000000..2f864a31c8bf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/tokens/urls.go @@ -0,0 +1,7 @@ +package tokens + +import "github.com/gophercloud/gophercloud" + +func tokenURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("auth", "tokens") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go new file mode 100644 index 000000000000..994ce71bcb92 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/doc.go @@ -0,0 +1,172 @@ +/* +Package users manages and retrieves Users in the OpenStack Identity Service. + +Example to List Users + + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.List(identityClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +Example to Create a User + + projectID := "a99e9b4e620e4db09a2dfb6e42a01e66" + + createOpts := users.CreateOpts{ + Name: "username", + DomainID: "default", + DefaultProjectID: projectID, + Enabled: gophercloud.Enabled, + Password: "supersecret", + Extra: map[string]interface{}{ + "email": "username@example.com", + } + } + + user, err := users.Create(identityClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + updateOpts := users.UpdateOpts{ + Enabled: gophercloud.Disabled, + } + + user, err := users.Update(identityClient, userID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Change Password of a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + originalPassword := "secretsecret" + password := "new_secretsecret" + + changePasswordOpts := users.ChangePasswordOpts{ + OriginalPassword: originalPassword, + Password: password, + } + + err := users.ChangePassword(identityClient, userID, changePasswordOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a User + + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.Delete(identityClient, userID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Groups a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListGroups(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Add a User to a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.AddToGroup(identityClient, groupID, userID).ExtractErr() + + if err != nil { + panic(err) + } + +Example to Check Whether a User Belongs to a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + ok, err := users.IsMemberOfGroup(identityClient, groupID, userID).Extract() + if err != nil { + panic(err) + } + + if ok { + fmt.Printf("user %s is a member of group %s\n", userID, groupID) + } + +Example to Remove a User from a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + userID := "0fe36e73809d46aeae6705c39077b1b3" + err := users.RemoveFromGroup(identityClient, groupID, userID).ExtractErr() + + if err != nil { + panic(err) + } + +Example to List Projects a User Belongs To + + userID := "0fe36e73809d46aeae6705c39077b1b3" + + allPages, err := users.ListProjects(identityClient, userID).AllPages() + if err != nil { + panic(err) + } + + allProjects, err := projects.ExtractProjects(allPages) + if err != nil { + panic(err) + } + + for _, project := range allProjects { + fmt.Printf("%+v\n", project) + } + +Example to List Users in a Group + + groupID := "bede500ee1124ae9b0006ff859758b3a" + listOpts := users.ListOpts{ + DomainID: "default", + } + + allPages, err := users.ListInGroup(identityClient, groupID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allUsers, err := users.ExtractUsers(allPages) + if err != nil { + panic(err) + } + + for _, user := range allUsers { + fmt.Printf("%+v\n", user) + } + +*/ +package users diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go new file mode 100644 index 000000000000..0f0b79875497 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/errors.go @@ -0,0 +1,17 @@ +package users + +import "fmt" + +// InvalidListFilter is returned by the ToUserListQuery method when validation of +// a filter does not pass +type InvalidListFilter struct { + FilterName string +} + +func (e InvalidListFilter) Error() string { + s := fmt.Sprintf( + "Invalid filter name [%s]: it must be in format of NAME__COMPARATOR", + e.FilterName, + ) + return s +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go new file mode 100644 index 000000000000..ed3d1a026e96 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/requests.go @@ -0,0 +1,338 @@ +package users + +import ( + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/pagination" +) + +// Option is a specific option defined at the API to enable features +// on a user account. +type Option string + +const ( + IgnoreChangePasswordUponFirstUse Option = "ignore_change_password_upon_first_use" + IgnorePasswordExpiry Option = "ignore_password_expiry" + IgnoreLockoutFailureAttempts Option = "ignore_lockout_failure_attempts" + MultiFactorAuthRules Option = "multi_factor_auth_rules" + MultiFactorAuthEnabled Option = "multi_factor_auth_enabled" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToUserListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // DomainID filters the response by a domain ID. + DomainID string `q:"domain_id"` + + // Enabled filters the response by enabled users. + Enabled *bool `q:"enabled"` + + // IdpID filters the response by an Identity Provider ID. + IdPID string `q:"idp_id"` + + // Name filters the response by username. + Name string `q:"name"` + + // PasswordExpiresAt filters the response based on expiring passwords. + PasswordExpiresAt string `q:"password_expires_at"` + + // ProtocolID filters the response by protocol ID. + ProtocolID string `q:"protocol_id"` + + // UniqueID filters the response by unique ID. + UniqueID string `q:"unique_id"` + + // Filters filters the response by custom filters such as + // 'name__contains=foo' + Filters map[string]string `q:"-"` +} + +// ToUserListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToUserListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + + params := q.Query() + for k, v := range opts.Filters { + i := strings.Index(k, "__") + if i > 0 && i < len(k)-2 { + params.Add(k, v) + } else { + return "", InvalidListFilter{FilterName: k} + } + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// List enumerates the Users to which the current token has access. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details on a single user, by ID. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToUserCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a user. +type CreateOpts struct { + // Name is the name of the new user. + Name string `json:"name" required:"true"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToUserCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Create creates a new User. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToUserCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToUserUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts provides options for updating a user account. +type UpdateOpts struct { + // Name is the name of the new user. + Name string `json:"name,omitempty"` + + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id,omitempty"` + + // Description is a description of the user. + Description string `json:"description,omitempty"` + + // DomainID is the ID of the domain the user belongs to. + DomainID string `json:"domain_id,omitempty"` + + // Enabled sets the user status to enabled or disabled. + Enabled *bool `json:"enabled,omitempty"` + + // Extra is free-form extra key/value pairs to describe the user. + Extra map[string]interface{} `json:"-"` + + // Options are defined options in the API to enable certain features. + Options map[Option]interface{} `json:"options,omitempty"` + + // Password is the password of the new user. + Password string `json:"password,omitempty"` +} + +// ToUserUpdateMap formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToUserUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + if v, ok := b["user"].(map[string]interface{}); ok { + for key, value := range opts.Extra { + v[key] = value + } + } + } + + return b, nil +} + +// Update updates an existing User. +func Update(client *gophercloud.ServiceClient, userID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToUserUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Patch(updateURL(client, userID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ChangePasswordOptsBuilder allows extensions to add additional parameters to +// the ChangePassword request. +type ChangePasswordOptsBuilder interface { + ToUserChangePasswordMap() (map[string]interface{}, error) +} + +// ChangePasswordOpts provides options for changing password for a user. +type ChangePasswordOpts struct { + // OriginalPassword is the original password of the user. + OriginalPassword string `json:"original_password"` + + // Password is the new password of the user. + Password string `json:"password"` +} + +// ToUserChangePasswordMap formats a ChangePasswordOpts into a ChangePassword request. +func (opts ChangePasswordOpts) ToUserChangePasswordMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "user") + if err != nil { + return nil, err + } + + return b, nil +} + +// ChangePassword changes password for a user. +func ChangePassword(client *gophercloud.ServiceClient, userID string, opts ChangePasswordOptsBuilder) (r ChangePasswordResult) { + b, err := opts.ToUserChangePasswordMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(changePasswordURL(client, userID), &b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Delete deletes a user. +func Delete(client *gophercloud.ServiceClient, userID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, userID), nil) + return +} + +// ListGroups enumerates groups user belongs to. +func ListGroups(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listGroupsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return groups.GroupPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// AddToGroup adds a user to a group. +func AddToGroup(client *gophercloud.ServiceClient, groupID, userID string) (r AddToGroupResult) { + url := addToGroupURL(client, groupID, userID) + _, r.Err = client.Put(url, nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// IsMemberOfGroup checks whether a user belongs to a group. +func IsMemberOfGroup(client *gophercloud.ServiceClient, groupID, userID string) (r IsMemberOfGroupResult) { + url := isMemberOfGroupURL(client, groupID, userID) + var response *http.Response + response, r.Err = client.Head(url, &gophercloud.RequestOpts{ + OkCodes: []int{204, 404}, + }) + if r.Err == nil && response != nil { + if (*response).StatusCode == 204 { + r.isMember = true + } + } + + return +} + +// RemoveFromGroup removes a user from a group. +func RemoveFromGroup(client *gophercloud.ServiceClient, groupID, userID string) (r RemoveFromGroupResult) { + url := removeFromGroupURL(client, groupID, userID) + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// ListProjects enumerates groups user belongs to. +func ListProjects(client *gophercloud.ServiceClient, userID string) pagination.Pager { + url := listProjectsURL(client, userID) + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return projects.ProjectPage{LinkedPageBase: pagination.LinkedPageBase{PageResult: r}} + }) +} + +// ListInGroup enumerates users that belong to a group. +func ListInGroup(client *gophercloud.ServiceClient, groupID string, opts ListOptsBuilder) pagination.Pager { + url := listInGroupURL(client, groupID) + if opts != nil { + query, err := opts.ToUserListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return UserPage{pagination.LinkedPageBase{PageResult: r}} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go new file mode 100644 index 000000000000..e158ac723bb5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/results.go @@ -0,0 +1,179 @@ +package users + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// User represents a User in the OpenStack Identity Service. +type User struct { + // DefaultProjectID is the ID of the default project of the user. + DefaultProjectID string `json:"default_project_id"` + + // Description is the description of the user. + Description string `json:"description"` + + // DomainID is the domain ID the user belongs to. + DomainID string `json:"domain_id"` + + // Enabled is whether or not the user is enabled. + Enabled bool `json:"enabled"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // ID is the unique ID of the user. + ID string `json:"id"` + + // Links contains referencing links to the user. + Links map[string]interface{} `json:"links"` + + // Name is the name of the user. + Name string `json:"name"` + + // Options are a set of defined options of the user. + Options map[string]interface{} `json:"options"` + + // PasswordExpiresAt is the timestamp when the user's password expires. + PasswordExpiresAt time.Time `json:"-"` +} + +func (r *User) UnmarshalJSON(b []byte) error { + type tmp User + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + PasswordExpiresAt gophercloud.JSONRFC3339MilliNoZ `json:"password_expires_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = User(s.tmp) + + r.PasswordExpiresAt = time.Time(s.PasswordExpiresAt) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "password_expires_at") + r.Extra = internal.RemainingKeys(User{}, resultMap) + } + } + + return err +} + +type userResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a User. +type GetResult struct { + userResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a User. +type CreateResult struct { + userResult +} + +// UpdateResult is the response from an Update operation. Call its Extract +// method to interpret it as a User. +type UpdateResult struct { + userResult +} + +// ChangePasswordResult is the response from a ChangePassword operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type ChangePasswordResult struct { + gophercloud.ErrResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AddToGroupResult is the response from a AddToGroup operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type AddToGroupResult struct { + gophercloud.ErrResult +} + +// IsMemberOfGroupResult is the response from a IsMemberOfGroup operation. Call its +// Extract method to determine if the request succeeded or failed. +type IsMemberOfGroupResult struct { + isMember bool + gophercloud.Result +} + +// RemoveFromGroupResult is the response from a RemoveFromGroup operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type RemoveFromGroupResult struct { + gophercloud.ErrResult +} + +// UserPage is a single page of User results. +type UserPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a UserPage contains any results. +func (r UserPage) IsEmpty() (bool, error) { + users, err := ExtractUsers(r) + return len(users) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r UserPage) NextPageURL() (string, error) { + var s struct { + Links struct { + Next string `json:"next"` + Previous string `json:"previous"` + } `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Links.Next, err +} + +// ExtractUsers returns a slice of Users contained in a single page of results. +func ExtractUsers(r pagination.Page) ([]User, error) { + var s struct { + Users []User `json:"users"` + } + err := (r.(UserPage)).ExtractInto(&s) + return s.Users, err +} + +// Extract interprets any user results as a User. +func (r userResult) Extract() (*User, error) { + var s struct { + User *User `json:"user"` + } + err := r.ExtractInto(&s) + return s.User, err +} + +// Extract extracts IsMemberOfGroupResult as bool and error values +func (r IsMemberOfGroupResult) Extract() (bool, error) { + return r.isMember, r.Err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/fixtures.go new file mode 100644 index 000000000000..d68e1ac2a41c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/fixtures.go @@ -0,0 +1,532 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListOutput provides a single page of User results. +const ListOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://example.com/identity/v3/users" + }, + "users": [ + { + "domain_id": "default", + "enabled": true, + "id": "2844b2a08be147a08ef58317d6471f1f", + "links": { + "self": "http://example.com/identity/v3/users/2844b2a08be147a08ef58317d6471f1f" + }, + "name": "glance", + "password_expires_at": null, + "description": "some description", + "extra": { + "email": "glance@localhost" + } + }, + { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/users/9fe1d3" + }, + "name": "jsmith", + "password_expires_at": "2016-11-06T15:32:17.000000", + "email": "jsmith@example.com", + "options": { + "ignore_password_expiry": true, + "multi_factor_auth_rules": [["password", "totp"], ["password", "custom-auth-method"]] + } + } + ] +} +` + +// GetOutput provides a Get result. +const GetOutput = ` +{ + "user": { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/users/9fe1d3" + }, + "name": "jsmith", + "password_expires_at": "2016-11-06T15:32:17.000000", + "email": "jsmith@example.com", + "options": { + "ignore_password_expiry": true, + "multi_factor_auth_rules": [["password", "totp"], ["password", "custom-auth-method"]] + } + } +} +` + +// GetOutputNoOptions provides a Get result of a user with no options. +const GetOutputNoOptions = ` +{ + "user": { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": true, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/users/9fe1d3" + }, + "name": "jsmith", + "password_expires_at": "2016-11-06T15:32:17.000000", + "email": "jsmith@example.com" + } +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "user": { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": true, + "name": "jsmith", + "password": "secretsecret", + "email": "jsmith@example.com", + "options": { + "ignore_password_expiry": true, + "multi_factor_auth_rules": [["password", "totp"], ["password", "custom-auth-method"]] + } + } +} +` + +// CreateNoOptionsRequest provides the input to a Create request with no Options. +const CreateNoOptionsRequest = ` +{ + "user": { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": true, + "name": "jsmith", + "password": "secretsecret", + "email": "jsmith@example.com" + } +} +` + +// UpdateRequest provides the input to an Update request. +const UpdateRequest = ` +{ + "user": { + "enabled": false, + "disabled_reason": "DDOS", + "options": { + "multi_factor_auth_rules": null + } + } +} +` + +// UpdateOutput provides an update result. +const UpdateOutput = ` +{ + "user": { + "default_project_id": "263fd9", + "domain_id": "1789d1", + "enabled": false, + "id": "9fe1d3", + "links": { + "self": "https://example.com/identity/v3/users/9fe1d3" + }, + "name": "jsmith", + "password_expires_at": "2016-11-06T15:32:17.000000", + "email": "jsmith@example.com", + "disabled_reason": "DDOS", + "options": { + "ignore_password_expiry": true + } + } +} +` + +// ChangePasswordRequest provides the input to a ChangePassword request. +const ChangePasswordRequest = ` +{ + "user": { + "password": "new_secretsecret", + "original_password": "secretsecret" + } +} +` + +// ListGroupsOutput provides a ListGroups result. +const ListGroupsOutput = ` +{ + "groups": [ + { + "description": "Developers cleared for work on all general projects", + "domain_id": "1789d1", + "id": "ea167b", + "links": { + "self": "https://example.com/identity/v3/groups/ea167b" + }, + "building": "Hilltop A", + "name": "Developers" + }, + { + "description": "Developers cleared for work on secret projects", + "domain_id": "1789d1", + "id": "a62db1", + "links": { + "self": "https://example.com/identity/v3/groups/a62db1" + }, + "name": "Secure Developers" + } + ], + "links": { + "self": "http://example.com/identity/v3/users/9fe1d3/groups", + "previous": null, + "next": null + } +} +` + +// ListProjectsOutput provides a ListProjects result. +const ListProjectsOutput = ` +{ + "links": { + "next": null, + "previous": null, + "self": "http://localhost:5000/identity/v3/users/foobar/projects" + }, + "projects": [ + { + "description": "my first project", + "domain_id": "11111", + "enabled": true, + "id": "abcde", + "links": { + "self": "http://localhost:5000/identity/v3/projects/abcde" + }, + "name": "project 1", + "parent_id": "11111" + }, + { + "description": "my second project", + "domain_id": "22222", + "enabled": true, + "id": "bcdef", + "links": { + "self": "http://localhost:5000/identity/v3/projects/bcdef" + }, + "name": "project 2", + "parent_id": "22222" + } + ] +} +` + +// FirstUser is the first user in the List request. +var nilTime time.Time +var FirstUser = users.User{ + DomainID: "default", + Enabled: true, + ID: "2844b2a08be147a08ef58317d6471f1f", + Links: map[string]interface{}{ + "self": "http://example.com/identity/v3/users/2844b2a08be147a08ef58317d6471f1f", + }, + Name: "glance", + PasswordExpiresAt: nilTime, + Description: "some description", + Extra: map[string]interface{}{ + "email": "glance@localhost", + }, +} + +// SecondUser is the second user in the List request. +var SecondUserPasswordExpiresAt, _ = time.Parse(gophercloud.RFC3339MilliNoZ, "2016-11-06T15:32:17.000000") +var SecondUser = users.User{ + DefaultProjectID: "263fd9", + DomainID: "1789d1", + Enabled: true, + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/users/9fe1d3", + }, + Name: "jsmith", + PasswordExpiresAt: SecondUserPasswordExpiresAt, + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + }, + Options: map[string]interface{}{ + "ignore_password_expiry": true, + "multi_factor_auth_rules": []interface{}{ + []string{"password", "totp"}, + []string{"password", "custom-auth-method"}, + }, + }, +} + +var SecondUserNoOptions = users.User{ + DefaultProjectID: "263fd9", + DomainID: "1789d1", + Enabled: true, + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/users/9fe1d3", + }, + Name: "jsmith", + PasswordExpiresAt: SecondUserPasswordExpiresAt, + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + }, +} + +// SecondUserUpdated is how SecondUser should look after an Update. +var SecondUserUpdated = users.User{ + DefaultProjectID: "263fd9", + DomainID: "1789d1", + Enabled: false, + ID: "9fe1d3", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/users/9fe1d3", + }, + Name: "jsmith", + PasswordExpiresAt: SecondUserPasswordExpiresAt, + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + "disabled_reason": "DDOS", + }, + Options: map[string]interface{}{ + "ignore_password_expiry": true, + }, +} + +// ExpectedUsersSlice is the slice of users expected to be returned from ListOutput. +var ExpectedUsersSlice = []users.User{FirstUser, SecondUser} + +var FirstGroup = groups.Group{ + Description: "Developers cleared for work on all general projects", + DomainID: "1789d1", + ID: "ea167b", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/groups/ea167b", + }, + Extra: map[string]interface{}{ + "building": "Hilltop A", + }, + Name: "Developers", +} + +var SecondGroup = groups.Group{ + Description: "Developers cleared for work on secret projects", + DomainID: "1789d1", + ID: "a62db1", + Links: map[string]interface{}{ + "self": "https://example.com/identity/v3/groups/a62db1", + }, + Extra: map[string]interface{}{}, + Name: "Secure Developers", +} + +var ExpectedGroupsSlice = []groups.Group{FirstGroup, SecondGroup} + +var FirstProject = projects.Project{ + Description: "my first project", + DomainID: "11111", + Enabled: true, + ID: "abcde", + Name: "project 1", + ParentID: "11111", +} + +var SecondProject = projects.Project{ + Description: "my second project", + DomainID: "22222", + Enabled: true, + ID: "bcdef", + Name: "project 2", + ParentID: "22222", +} + +var ExpectedProjectsSlice = []projects.Project{FirstProject, SecondProject} + +// HandleListUsersSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that responds with a list of two users. +func HandleListUsersSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} + +// HandleGetUserSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that responds with a single user. +func HandleGetUserSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateUserSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that tests user creation. +func HandleCreateUserSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutput) + }) +} + +// HandleCreateNoOptionsUserSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that tests user creation. +func HandleCreateNoOptionsUserSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateNoOptionsRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetOutputNoOptions) + }) +} + +// HandleUpdateUserSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that tests user update. +func HandleUpdateUserSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, UpdateOutput) + }) +} + +// HandleChangeUserPasswordSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that tests change user password. +func HandleChangeUserPasswordSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3/password", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, ChangePasswordRequest) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleDeleteUserSuccessfully creates an HTTP handler at `/users` on the +// test handler mux that tests user deletion. +func HandleDeleteUserSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleListUserGroupsSuccessfully creates an HTTP handler at /users/{userID}/groups +// on the test handler mux that respons with a list of two groups +func HandleListUserGroupsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3/groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListGroupsOutput) + }) +} + +// HandleAddToGroupSuccessfully creates an HTTP handler at /groups/{groupID}/users/{userID} +// on the test handler mux that tests adding user to group. +func HandleAddToGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/ea167b/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleIsMemberOfGroupSuccessfully creates an HTTP handler at /groups/{groupID}/users/{userID} +// on the test handler mux that tests checking whether user belongs to group. +func HandleIsMemberOfGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/ea167b/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleRemoveFromGroupSuccessfully creates an HTTP handler at /groups/{groupID}/users/{userID} +// on the test handler mux that tests removing user from group. +func HandleRemoveFromGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/ea167b/users/9fe1d3", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleListUserProjectsSuccessfully creates an HTTP handler at /users/{userID}/projects +// on the test handler mux that respons wit a list of two projects +func HandleListUserProjectsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/users/9fe1d3/projects", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListProjectsOutput) + }) +} + +// HandleListInGroupSuccessfully creates an HTTP handler at /groups/{groupID}/users +// on the test handler mux that response with a list of two users +func HandleListInGroupSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/groups/ea167b/users", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListOutput) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/requests_test.go new file mode 100644 index 000000000000..3eb1b46f5b3f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/testing/requests_test.go @@ -0,0 +1,248 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/identity/v3/groups" + "github.com/gophercloud/gophercloud/openstack/identity/v3/projects" + "github.com/gophercloud/gophercloud/openstack/identity/v3/users" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListUsers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListUsersSuccessfully(t) + + count := 0 + err := users.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := users.ExtractUsers(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedUsersSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListUsersAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListUsersSuccessfully(t) + + allPages, err := users.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedUsersSlice, actual) + th.AssertEquals(t, ExpectedUsersSlice[0].Extra["email"], "glance@localhost") + th.AssertEquals(t, ExpectedUsersSlice[1].Extra["email"], "jsmith@example.com") +} + +func TestListUsersFiltersCheck(t *testing.T) { + type test struct { + filterName string + wantErr bool + } + tests := []test{ + {"foo__contains", false}, + {"foo", true}, + {"foo_contains", true}, + {"foo__", true}, + {"__foo", true}, + } + + var listOpts users.ListOpts + for _, _test := range tests { + listOpts.Filters = map[string]string{_test.filterName: "bar"} + _, err := listOpts.ToUserListQuery() + + if !_test.wantErr { + th.AssertNoErr(t, err) + } else { + switch _t := err.(type) { + case nil: + t.Fatal("error expected but got a nil") + case users.InvalidListFilter: + default: + t.Fatalf("unexpected error type: [%T]", _t) + } + } + } +} + +func TestGetUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetUserSuccessfully(t) + + actual, err := users.Get(client.ServiceClient(), "9fe1d3").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondUser, *actual) + th.AssertEquals(t, SecondUser.Extra["email"], "jsmith@example.com") +} + +func TestCreateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateUserSuccessfully(t) + + iTrue := true + createOpts := users.CreateOpts{ + Name: "jsmith", + DomainID: "1789d1", + Enabled: &iTrue, + Password: "secretsecret", + DefaultProjectID: "263fd9", + Options: map[users.Option]interface{}{ + users.IgnorePasswordExpiry: true, + users.MultiFactorAuthRules: []interface{}{ + []string{"password", "totp"}, + []string{"password", "custom-auth-method"}, + }, + }, + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + }, + } + + actual, err := users.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondUser, *actual) +} + +func TestCreateNoOptionsUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateNoOptionsUserSuccessfully(t) + + iTrue := true + createOpts := users.CreateOpts{ + Name: "jsmith", + DomainID: "1789d1", + Enabled: &iTrue, + Password: "secretsecret", + DefaultProjectID: "263fd9", + Extra: map[string]interface{}{ + "email": "jsmith@example.com", + }, + } + + actual, err := users.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondUserNoOptions, *actual) +} + +func TestUpdateUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateUserSuccessfully(t) + + iFalse := false + updateOpts := users.UpdateOpts{ + Enabled: &iFalse, + Options: map[users.Option]interface{}{ + users.MultiFactorAuthRules: nil, + }, + Extra: map[string]interface{}{ + "disabled_reason": "DDOS", + }, + } + + actual, err := users.Update(client.ServiceClient(), "9fe1d3", updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, SecondUserUpdated, *actual) +} + +func TestChangeUserPassword(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleChangeUserPasswordSuccessfully(t) + + changePasswordOpts := users.ChangePasswordOpts{ + OriginalPassword: "secretsecret", + Password: "new_secretsecret", + } + + res := users.ChangePassword(client.ServiceClient(), "9fe1d3", changePasswordOpts) + th.AssertNoErr(t, res.Err) +} + +func TestDeleteUser(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteUserSuccessfully(t) + + res := users.Delete(client.ServiceClient(), "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestListUserGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListUserGroupsSuccessfully(t) + allPages, err := users.ListGroups(client.ServiceClient(), "9fe1d3").AllPages() + th.AssertNoErr(t, err) + actual, err := groups.ExtractGroups(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedGroupsSlice, actual) +} + +func TestAddToGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAddToGroupSuccessfully(t) + res := users.AddToGroup(client.ServiceClient(), "ea167b", "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestIsMemberOfGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleIsMemberOfGroupSuccessfully(t) + ok, err := users.IsMemberOfGroup(client.ServiceClient(), "ea167b", "9fe1d3").Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, true, ok) +} + +func TestRemoveFromGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRemoveFromGroupSuccessfully(t) + res := users.RemoveFromGroup(client.ServiceClient(), "ea167b", "9fe1d3") + th.AssertNoErr(t, res.Err) +} + +func TestListUserProjects(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListUserProjectsSuccessfully(t) + allPages, err := users.ListProjects(client.ServiceClient(), "9fe1d3").AllPages() + th.AssertNoErr(t, err) + actual, err := projects.ExtractProjects(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedProjectsSlice, actual) +} + +func TestListInGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListInGroupSuccessfully(t) + + iTrue := true + listOpts := users.ListOpts{ + Enabled: &iTrue, + } + + allPages, err := users.ListInGroup(client.ServiceClient(), "ea167b", listOpts).AllPages() + th.AssertNoErr(t, err) + actual, err := users.ExtractUsers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedUsersSlice, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go new file mode 100644 index 000000000000..3caa8bbb6ca9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/identity/v3/users/urls.go @@ -0,0 +1,51 @@ +package users + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func getURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("users") +} + +func updateURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func changePasswordURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "password") +} + +func deleteURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID) +} + +func listGroupsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "groups") +} + +func addToGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func isMemberOfGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func removeFromGroupURL(client *gophercloud.ServiceClient, groupID, userID string) string { + return client.ServiceURL("groups", groupID, "users", userID) +} + +func listProjectsURL(client *gophercloud.ServiceClient, userID string) string { + return client.ServiceURL("users", userID, "projects") +} + +func listInGroupURL(client *gophercloud.ServiceClient, groupID string) string { + return client.ServiceURL("groups", groupID, "users") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/README.md b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/README.md new file mode 100644 index 000000000000..05c19befe8ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/README.md @@ -0,0 +1 @@ +This provides a Go API which wraps any service implementing the [OpenStack Image Service API, version 2](http://developer.openstack.org/api-ref-image-v2.html). diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go new file mode 100644 index 000000000000..0c12bf2e0779 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/doc.go @@ -0,0 +1,48 @@ +/* +Package imagedata enables management of image data. + +Example to Upload Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + imageData, err := os.Open("/path/to/image/file") + if err != nil { + panic(err) + } + defer imageData.Close() + + err = imagedata.Upload(imageClient, imageID, imageData).ExtractErr() + if err != nil { + panic(err) + } + +Example to Stage Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + imageData, err := os.Open("/path/to/image/file") + if err != nil { + panic(err) + } + defer imageData.Close() + + err = imagedata.Stage(imageClient, imageID, imageData).ExtractErr() + if err != nil { + panic(err) + } + +Example to Download Image Data + + imageID := "da3b75d9-3f4a-40e7-8a2c-bfab23927dea" + + image, err := imagedata.Download(imageClient, imageID).Extract() + if err != nil { + panic(err) + } + + imageData, err := ioutil.ReadAll(image) + if err != nil { + panic(err) + } +*/ +package imagedata diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go new file mode 100644 index 000000000000..545c561f366c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/requests.go @@ -0,0 +1,39 @@ +package imagedata + +import ( + "io" + "net/http" + + "github.com/gophercloud/gophercloud" +) + +// Upload uploads an image file. +func Upload(client *gophercloud.ServiceClient, id string, data io.Reader) (r UploadResult) { + _, r.Err = client.Put(uploadURL(client, id), data, nil, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, + OkCodes: []int{204}, + }) + return +} + +// Stage performs PUT call on the existing image object in the Imageservice with +// the provided file. +// Existing image object must be in the "queued" status. +func Stage(client *gophercloud.ServiceClient, id string, data io.Reader) (r StageResult) { + _, r.Err = client.Put(stageURL(client, id), data, nil, &gophercloud.RequestOpts{ + MoreHeaders: map[string]string{"Content-Type": "application/octet-stream"}, + OkCodes: []int{204}, + }) + return +} + +// Download retrieves an image. +func Download(client *gophercloud.ServiceClient, id string) (r DownloadResult) { + var resp *http.Response + resp, r.Err = client.Get(downloadURL(client, id), nil, nil) + if resp != nil { + r.Body = resp.Body + r.Header = resp.Header + } + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go new file mode 100644 index 000000000000..17a3f0e0f7e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/results.go @@ -0,0 +1,34 @@ +package imagedata + +import ( + "fmt" + "io" + + "github.com/gophercloud/gophercloud" +) + +// UploadResult is the result of an upload image operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type UploadResult struct { + gophercloud.ErrResult +} + +// StageResult is the result of a stage image operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type StageResult struct { + gophercloud.ErrResult +} + +// DownloadResult is the result of a download image operation. Call its Extract +// method to gain access to the image data. +type DownloadResult struct { + gophercloud.Result +} + +// Extract builds images model from io.Reader +func (r DownloadResult) Extract() (io.Reader, error) { + if r, ok := r.Body.(io.Reader); ok { + return r, nil + } + return nil, fmt.Errorf("Expected io.Reader but got: %T(%#v)", r.Body, r.Body) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/doc.go new file mode 100644 index 000000000000..5a9db1bef38a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/doc.go @@ -0,0 +1,2 @@ +// imagedata unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/fixtures.go new file mode 100644 index 000000000000..64c44bdf6fc9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/fixtures.go @@ -0,0 +1,57 @@ +package testing + +import ( + "io/ioutil" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandlePutImageDataSuccessfully setup +func HandlePutImageDataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read request body: %v", err) + } + + th.AssertByteArrayEquals(t, []byte{5, 3, 7, 24}, b) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleStageImageDataSuccessfully setup +func HandleStageImageDataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/stage", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read request body: %v", err) + } + + th.AssertByteArrayEquals(t, []byte{5, 3, 7, 24}, b) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetImageDataSuccessfully setup +func HandleGetImageDataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusOK) + + _, err := w.Write([]byte{34, 87, 0, 23, 23, 23, 56, 255, 254, 0}) + th.AssertNoErr(t, err) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/requests_test.go new file mode 100644 index 000000000000..7155f61b6353 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/testing/requests_test.go @@ -0,0 +1,101 @@ +package testing + +import ( + "fmt" + "io" + "io/ioutil" + "testing" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestUpload(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandlePutImageDataSuccessfully(t) + + err := imagedata.Upload( + fakeclient.ServiceClient(), + "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + readSeekerOfBytes([]byte{5, 3, 7, 24})).ExtractErr() + + th.AssertNoErr(t, err) +} + +func TestStage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleStageImageDataSuccessfully(t) + + err := imagedata.Stage( + fakeclient.ServiceClient(), + "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + readSeekerOfBytes([]byte{5, 3, 7, 24})).ExtractErr() + + th.AssertNoErr(t, err) +} + +func readSeekerOfBytes(bs []byte) io.ReadSeeker { + return &RS{bs: bs} +} + +// implements io.ReadSeeker +type RS struct { + bs []byte + offset int +} + +func (rs *RS) Read(p []byte) (int, error) { + leftToRead := len(rs.bs) - rs.offset + + if 0 < leftToRead { + bytesToWrite := min(leftToRead, len(p)) + for i := 0; i < bytesToWrite; i++ { + p[i] = rs.bs[rs.offset] + rs.offset++ + } + return bytesToWrite, nil + } + return 0, io.EOF +} + +func min(a int, b int) int { + if a < b { + return a + } + return b +} + +func (rs *RS) Seek(offset int64, whence int) (int64, error) { + var offsetInt = int(offset) + if whence == 0 { + rs.offset = offsetInt + } else if whence == 1 { + rs.offset = rs.offset + offsetInt + } else if whence == 2 { + rs.offset = len(rs.bs) - offsetInt + } else { + return 0, fmt.Errorf("For parameter `whence`, expected value in {0,1,2} but got: %#v", whence) + } + + return int64(rs.offset), nil +} + +func TestDownload(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGetImageDataSuccessfully(t) + + rdr, err := imagedata.Download(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea").Extract() + th.AssertNoErr(t, err) + + bs, err := ioutil.ReadAll(rdr) + th.AssertNoErr(t, err) + + th.AssertByteArrayEquals(t, []byte{34, 87, 0, 23, 23, 23, 56, 255, 254, 0}, bs) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go new file mode 100644 index 000000000000..d9615ba7fb8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imagedata/urls.go @@ -0,0 +1,23 @@ +package imagedata + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "images" + uploadPath = "file" + stagePath = "stage" +) + +// `imageDataURL(c,i)` is the URL for the binary image data for the +// image identified by ID `i` in the service `c`. +func uploadURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL(rootPath, imageID, uploadPath) +} + +func stageURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL(rootPath, imageID, stagePath) +} + +func downloadURL(c *gophercloud.ServiceClient, imageID string) string { + return uploadURL(c, imageID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/doc.go new file mode 100644 index 000000000000..6862f3172ce4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/doc.go @@ -0,0 +1,14 @@ +/* +Package imageimport enables management of images import and retrieval of the +Imageservice Import API information. + +Example to Get an information about the Import API + + importInfo, err := imageimport.Get(imagesClient).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", importInfo) +*/ +package imageimport diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/requests.go new file mode 100644 index 000000000000..b374b0e47cfc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/requests.go @@ -0,0 +1,20 @@ +package imageimport + +import "github.com/gophercloud/gophercloud" + +// ImportMethod represents valid Import API method. +type ImportMethod string + +const ( + // GlanceDirectMethod represents glance-direct Import API method. + GlanceDirectMethod ImportMethod = "glance-direct" + + // WebDownloadMethod represents web-download Import API method. + WebDownloadMethod ImportMethod = "web-download" +) + +// Get retrieves Import API information data. +func Get(c *gophercloud.ServiceClient) (r GetResult) { + _, r.Err = c.Get(infoURL(c), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/results.go new file mode 100644 index 000000000000..c579c7b15650 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/results.go @@ -0,0 +1,32 @@ +package imageimport + +import "github.com/gophercloud/gophercloud" + +type commonResult struct { + gophercloud.Result +} + +// GetResult represents the result of a get operation. Call its Extract method +// to interpret it as ImportInfo. +type GetResult struct { + commonResult +} + +// ImportInfo represents information data for the Import API. +type ImportInfo struct { + ImportMethods ImportMethods `json:"import-methods"` +} + +// ImportMethods contains information about available Import API methods. +type ImportMethods struct { + Description string `json:"description"` + Type string `json:"type"` + Value []string `json:"value"` +} + +// Extract is a function that accepts a result and extracts ImportInfo. +func (r commonResult) Extract() (*ImportInfo, error) { + var s *ImportInfo + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/fixtures.go new file mode 100644 index 000000000000..7cef5cff0668 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/fixtures.go @@ -0,0 +1,15 @@ +package testing + +// ImportGetResult represents raw server response on a Get request. +const ImportGetResult = ` +{ + "import-methods": { + "description": "Import methods available.", + "type": "array", + "value": [ + "glance-direct", + "web-download" + ] + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/requests_test.go new file mode 100644 index 000000000000..01434bd622fd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/testing/requests_test.go @@ -0,0 +1,38 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/info/import", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ImportGetResult) + }) + + validImportMethods := []string{ + string(imageimport.GlanceDirectMethod), + string(imageimport.WebDownloadMethod), + } + + s, err := imageimport.Get(fakeclient.ServiceClient()).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.ImportMethods.Description, "Import methods available.") + th.AssertEquals(t, s.ImportMethods.Type, "array") + th.AssertDeepEquals(t, s.ImportMethods.Value, validImportMethods) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/urls.go new file mode 100644 index 000000000000..6ec37669b31f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/imageimport/urls.go @@ -0,0 +1,12 @@ +package imageimport + +import "github.com/gophercloud/gophercloud" + +const ( + infoPath = "info" + resourcePath = "import" +) + +func infoURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(infoPath, resourcePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go new file mode 100644 index 000000000000..14da9ac90da9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/doc.go @@ -0,0 +1,60 @@ +/* +Package images enables management and retrieval of images from the OpenStack +Image Service. + +Example to List Images + + images.ListOpts{ + Owner: "a7509e1ae65945fda83f3e52c6296017", + } + + allPages, err := images.List(imagesClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allImages, err := images.ExtractImages(allPages) + if err != nil { + panic(err) + } + + for _, image := range allImages { + fmt.Printf("%+v\n", image) + } + +Example to Create an Image + + createOpts := images.CreateOpts{ + Name: "image_name", + Visibility: images.ImageVisibilityPrivate, + } + + image, err := images.Create(imageClient, createOpts) + if err != nil { + panic(err) + } + +Example to Update an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + + updateOpts := images.UpdateOpts{ + images.ReplaceImageName{ + NewName: "new_name", + }, + } + + image, err := images.Update(imageClient, imageID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Image + + imageID := "1bea47ed-f6a9-463b-b423-14b9cca9ad27" + err := images.Delete(imageClient, imageID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package images diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go new file mode 100644 index 000000000000..16290d395a1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/requests.go @@ -0,0 +1,352 @@ +package images + +import ( + "fmt" + "net/url" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToImageListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the server attributes you want to see returned. Marker and Limit are used +// for pagination. +// +// http://developer.openstack.org/api-ref-image-v2.html +type ListOpts struct { + // ID is the ID of the image. + // Multiple IDs can be specified by constructing a string + // such as "in:uuid1,uuid2,uuid3". + ID string `q:"id"` + + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // UUID of the server at which you want to set a marker. + Marker string `q:"marker"` + + // Name filters on the name of the image. + // Multiple names can be specified by constructing a string + // such as "in:name1,name2,name3". + Name string `q:"name"` + + // Visibility filters on the visibility of the image. + Visibility ImageVisibility `q:"visibility"` + + // MemberStatus filters on the member status of the image. + MemberStatus ImageMemberStatus `q:"member_status"` + + // Owner filters on the project ID of the image. + Owner string `q:"owner"` + + // Status filters on the status of the image. + // Multiple statuses can be specified by constructing a string + // such as "in:saving,queued". + Status ImageStatus `q:"status"` + + // SizeMin filters on the size_min image property. + SizeMin int64 `q:"size_min"` + + // SizeMax filters on the size_max image property. + SizeMax int64 `q:"size_max"` + + // Sort sorts the results using the new style of sorting. See the OpenStack + // Image API reference for the exact syntax. + // + // Sort cannot be used with the classic sort options (sort_key and sort_dir). + Sort string `q:"sort"` + + // SortKey will sort the results based on a specified image property. + SortKey string `q:"sort_key"` + + // SortDir will sort the list results either ascending or decending. + SortDir string `q:"sort_dir"` + + // Tags filters on specific image tags. + Tags []string `q:"tag"` + + // CreatedAtQuery filters images based on their creation date. + CreatedAtQuery *ImageDateQuery + + // UpdatedAtQuery filters images based on their updated date. + UpdatedAtQuery *ImageDateQuery + + // ContainerFormat filters images based on the container_format. + // Multiple container formats can be specified by constructing a + // string such as "in:bare,ami". + ContainerFormat string `q:"container_format"` + + // DiskFormat filters images based on the disk_format. + // Multiple disk formats can be specified by constructing a string + // such as "in:qcow2,iso". + DiskFormat string `q:"disk_format"` +} + +// ToImageListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToImageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + params := q.Query() + + if opts.CreatedAtQuery != nil { + createdAt := opts.CreatedAtQuery.Date.Format(time.RFC3339) + if v := opts.CreatedAtQuery.Filter; v != "" { + createdAt = fmt.Sprintf("%s:%s", v, createdAt) + } + + params.Add("created_at", createdAt) + } + + if opts.UpdatedAtQuery != nil { + updatedAt := opts.UpdatedAtQuery.Date.Format(time.RFC3339) + if v := opts.UpdatedAtQuery.Filter; v != "" { + updatedAt = fmt.Sprintf("%s:%s", v, updatedAt) + } + + params.Add("updated_at", updatedAt) + } + + q = &url.URL{RawQuery: params.Encode()} + + return q.String(), err +} + +// List implements image list request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToImageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + imagePage := ImagePage{ + serviceURL: c.ServiceURL(), + LinkedPageBase: pagination.LinkedPageBase{PageResult: r}, + } + + return imagePage + }) +} + +// CreateOptsBuilder allows extensions to add parameters to the Create request. +type CreateOptsBuilder interface { + // Returns value that can be passed to json.Marshal + ToImageCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create an image. +type CreateOpts struct { + // Name is the name of the new image. + Name string `json:"name" required:"true"` + + // Id is the the image ID. + ID string `json:"id,omitempty"` + + // Visibility defines who can see/use the image. + Visibility *ImageVisibility `json:"visibility,omitempty"` + + // Tags is a set of image tags. + Tags []string `json:"tags,omitempty"` + + // ContainerFormat is the format of the + // container. Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format,omitempty"` + + // DiskFormat is the format of the disk. If set, + // valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format,omitempty"` + + // MinDisk is the amount of disk space in + // GB that is required to boot the image. + MinDisk int `json:"min_disk,omitempty"` + + // MinRAM is the amount of RAM in MB that + // is required to boot the image. + MinRAM int `json:"min_ram,omitempty"` + + // protected is whether the image is not deletable. + Protected *bool `json:"protected,omitempty"` + + // properties is a set of properties, if any, that + // are associated with the image. + Properties map[string]string `json:"-"` +} + +// ToImageCreateMap assembles a request body based on the contents of +// a CreateOpts. +func (opts CreateOpts) ToImageCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.Properties != nil { + for k, v := range opts.Properties { + b[k] = v + } + } + return b, nil +} + +// Create implements create image request. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToImageCreateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{OkCodes: []int{201}}) + return +} + +// Delete implements image delete request. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get implements image get request. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// Update implements image updated request. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToImageUpdateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Patch(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + MoreHeaders: map[string]string{"Content-Type": "application/openstack-images-v2.1-json-patch"}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + // returns value implementing json.Marshaler which when marshaled matches + // the patch schema: + // http://specs.openstack.org/openstack/glance-specs/specs/api/v2/http-patch-image-api-v2.html + ToImageUpdateMap() ([]interface{}, error) +} + +// UpdateOpts implements UpdateOpts +type UpdateOpts []Patch + +// ToImageUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToImageUpdateMap() ([]interface{}, error) { + m := make([]interface{}, len(opts)) + for i, patch := range opts { + patchJSON := patch.ToImagePatchMap() + m[i] = patchJSON + } + return m, nil +} + +// Patch represents a single update to an existing image. Multiple updates +// to an image can be submitted at the same time. +type Patch interface { + ToImagePatchMap() map[string]interface{} +} + +// UpdateVisibility represents an updated visibility property request. +type UpdateVisibility struct { + Visibility ImageVisibility +} + +// ToImagePatchMap assembles a request body based on UpdateVisibility. +func (r UpdateVisibility) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/visibility", + "value": r.Visibility, + } +} + +// ReplaceImageName represents an updated image_name property request. +type ReplaceImageName struct { + NewName string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageName. +func (r ReplaceImageName) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/name", + "value": r.NewName, + } +} + +// ReplaceImageChecksum represents an updated checksum property request. +type ReplaceImageChecksum struct { + Checksum string +} + +// ReplaceImageChecksum assembles a request body based on ReplaceImageChecksum. +func (r ReplaceImageChecksum) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/checksum", + "value": r.Checksum, + } +} + +// ReplaceImageTags represents an updated tags property request. +type ReplaceImageTags struct { + NewTags []string +} + +// ToImagePatchMap assembles a request body based on ReplaceImageTags. +func (r ReplaceImageTags) ToImagePatchMap() map[string]interface{} { + return map[string]interface{}{ + "op": "replace", + "path": "/tags", + "value": r.NewTags, + } +} + +// UpdateOp represents a valid update operation. +type UpdateOp string + +const ( + AddOp UpdateOp = "add" + ReplaceOp UpdateOp = "replace" + RemoveOp UpdateOp = "remove" +) + +// UpdateImageProperty represents an update property request. +type UpdateImageProperty struct { + Op UpdateOp + Name string + Value string +} + +// ToImagePatchMap assembles a request body based on UpdateImageProperty. +func (r UpdateImageProperty) ToImagePatchMap() map[string]interface{} { + updateMap := map[string]interface{}{ + "op": r.Op, + "path": fmt.Sprintf("/%s", r.Name), + } + + if r.Value != "" { + updateMap["value"] = r.Value + } + + return updateMap +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go new file mode 100644 index 000000000000..676181e1f4e5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/results.go @@ -0,0 +1,202 @@ +package images + +import ( + "encoding/json" + "fmt" + "reflect" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// Image represents an image found in the OpenStack Image service. +type Image struct { + // ID is the image UUID. + ID string `json:"id"` + + // Name is the human-readable display name for the image. + Name string `json:"name"` + + // Status is the image status. It can be "queued" or "active" + // See imageservice/v2/images/type.go + Status ImageStatus `json:"status"` + + // Tags is a list of image tags. Tags are arbitrarily defined strings + // attached to an image. + Tags []string `json:"tags"` + + // ContainerFormat is the format of the container. + // Valid values are ami, ari, aki, bare, and ovf. + ContainerFormat string `json:"container_format"` + + // DiskFormat is the format of the disk. + // If set, valid values are ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, + // and iso. + DiskFormat string `json:"disk_format"` + + // MinDiskGigabytes is the amount of disk space in GB that is required to + // boot the image. + MinDiskGigabytes int `json:"min_disk"` + + // MinRAMMegabytes [optional] is the amount of RAM in MB that is required to + // boot the image. + MinRAMMegabytes int `json:"min_ram"` + + // Owner is the tenant ID the image belongs to. + Owner string `json:"owner"` + + // Protected is whether the image is deletable or not. + Protected bool `json:"protected"` + + // Visibility defines who can see/use the image. + Visibility ImageVisibility `json:"visibility"` + + // Checksum is the checksum of the data that's associated with the image. + Checksum string `json:"checksum"` + + // SizeBytes is the size of the data that's associated with the image. + SizeBytes int64 `json:"-"` + + // Metadata is a set of metadata associated with the image. + // Image metadata allow for meaningfully define the image properties + // and tags. + // See http://docs.openstack.org/developer/glance/metadefs-concepts.html. + Metadata map[string]string `json:"metadata"` + + // Properties is a set of key-value pairs, if any, that are associated with + // the image. + Properties map[string]interface{} + + // CreatedAt is the date when the image has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the date when the last change has been made to the image or + // it's properties. + UpdatedAt time.Time `json:"updated_at"` + + // File is the trailing path after the glance endpoint that represent the + // location of the image or the path to retrieve it. + File string `json:"file"` + + // Schema is the path to the JSON-schema that represent the image or image + // entity. + Schema string `json:"schema"` + + // VirtualSize is the virtual size of the image + VirtualSize int64 `json:"virtual_size"` +} + +func (r *Image) UnmarshalJSON(b []byte) error { + type tmp Image + var s struct { + tmp + SizeBytes interface{} `json:"size"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Image(s.tmp) + + switch t := s.SizeBytes.(type) { + case nil: + r.SizeBytes = 0 + case float32: + r.SizeBytes = int64(t) + case float64: + r.SizeBytes = int64(t) + default: + return fmt.Errorf("Unknown type for SizeBytes: %v (value: %v)", reflect.TypeOf(t), t) + } + + // Bundle all other fields into Properties + var result interface{} + err = json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + delete(resultMap, "self") + delete(resultMap, "size") + r.Properties = internal.RemainingKeys(Image{}, resultMap) + } + + return err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as an Image. +func (r commonResult) Extract() (*Image, error) { + var s *Image + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as an Image. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as an Image. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as an Image. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to interpret it as an Image. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ImagePage represents the results of a List request. +type ImagePage struct { + serviceURL string + pagination.LinkedPageBase +} + +// IsEmpty returns true if an ImagePage contains no Images results. +func (r ImagePage) IsEmpty() (bool, error) { + images, err := ExtractImages(r) + return len(images) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r ImagePage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + if s.Next == "" { + return "", nil + } + + return nextPageURL(r.serviceURL, s.Next) +} + +// ExtractImages interprets the results of a single page from a List() call, +// producing a slice of Image entities. +func ExtractImages(r pagination.Page) ([]Image, error) { + var s struct { + Images []Image `json:"images"` + } + err := (r.(ImagePage)).ExtractInto(&s) + return s.Images, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/doc.go new file mode 100644 index 000000000000..db10451530e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/doc.go @@ -0,0 +1,2 @@ +// images unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/fixtures.go new file mode 100644 index 000000000000..413ae6bc4ee3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/fixtures.go @@ -0,0 +1,447 @@ +package testing + +import ( + "fmt" + "net/http" + "strconv" + "strings" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +type imageEntry struct { + ID string + JSON string +} + +// HandleImageListSuccessfully test setup +func HandleImageListSuccessfully(t *testing.T) { + + images := make([]imageEntry, 3) + + images[0] = imageEntry{"cirros-0.3.4-x86_64-uec", + `{ + "status": "active", + "name": "cirros-0.3.4-x86_64-uec", + "tags": [], + "kernel_id": "e1b6edd4-bd9b-40ac-b010-8a6c16de4ba4", + "container_format": "ami", + "created_at": "2015-07-15T11:43:35Z", + "ramdisk_id": "8c64f48a-45a3-4eaa-adff-a8106b6c005b", + "disk_format": "ami", + "updated_at": "2015-07-15T11:43:35Z", + "visibility": "public", + "self": "/v2/images/07aa21a9-fa1a-430e-9a33-185be5982431", + "min_disk": 0, + "protected": false, + "id": "07aa21a9-fa1a-430e-9a33-185be5982431", + "size": 25165824, + "file": "/v2/images/07aa21a9-fa1a-430e-9a33-185be5982431/file", + "checksum": "eb9139e4942121f22bbc2afc0400b2a4", + "owner": "cba624273b8344e59dd1fd18685183b0", + "virtual_size": null, + "min_ram": 0, + "schema": "/v2/schemas/image", + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`} + images[1] = imageEntry{"cirros-0.3.4-x86_64-uec-ramdisk", + `{ + "status": "active", + "name": "cirros-0.3.4-x86_64-uec-ramdisk", + "tags": [], + "container_format": "ari", + "created_at": "2015-07-15T11:43:32Z", + "size": 3740163, + "disk_format": "ari", + "updated_at": "2015-07-15T11:43:32Z", + "visibility": "public", + "self": "/v2/images/8c64f48a-45a3-4eaa-adff-a8106b6c005b", + "min_disk": 0, + "protected": false, + "id": "8c64f48a-45a3-4eaa-adff-a8106b6c005b", + "file": "/v2/images/8c64f48a-45a3-4eaa-adff-a8106b6c005b/file", + "checksum": "be575a2b939972276ef675752936977f", + "owner": "cba624273b8344e59dd1fd18685183b0", + "virtual_size": null, + "min_ram": 0, + "schema": "/v2/schemas/image", + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`} + images[2] = imageEntry{"cirros-0.3.4-x86_64-uec-kernel", + `{ + "status": "active", + "name": "cirros-0.3.4-x86_64-uec-kernel", + "tags": [], + "container_format": "aki", + "created_at": "2015-07-15T11:43:29Z", + "size": 4979632, + "disk_format": "aki", + "updated_at": "2015-07-15T11:43:30Z", + "visibility": "public", + "self": "/v2/images/e1b6edd4-bd9b-40ac-b010-8a6c16de4ba4", + "min_disk": 0, + "protected": false, + "id": "e1b6edd4-bd9b-40ac-b010-8a6c16de4ba4", + "file": "/v2/images/e1b6edd4-bd9b-40ac-b010-8a6c16de4ba4/file", + "checksum": "8a40c862b5735975d82605c1dd395796", + "owner": "cba624273b8344e59dd1fd18685183b0", + "virtual_size": null, + "min_ram": 0, + "schema": "/v2/schemas/image", + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`} + + th.Mux.HandleFunc("/images", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + + w.WriteHeader(http.StatusOK) + + limit := 10 + var err error + if r.FormValue("limit") != "" { + limit, err = strconv.Atoi(r.FormValue("limit")) + if err != nil { + t.Errorf("Error value for 'limit' parameter %v (error: %v)", r.FormValue("limit"), err) + } + + } + + marker := "" + newMarker := "" + + if r.Form["marker"] != nil { + marker = r.Form["marker"][0] + } + + t.Logf("limit = %v marker = %v", limit, marker) + + selected := 0 + addNext := false + var imageJSON []string + + fmt.Fprintf(w, `{"images": [`) + + for _, i := range images { + if marker == "" || addNext { + t.Logf("Adding image %v to page", i.ID) + imageJSON = append(imageJSON, i.JSON) + newMarker = i.ID + selected++ + } else { + if strings.Contains(i.JSON, marker) { + addNext = true + } + } + + if selected == limit { + break + } + } + t.Logf("Writing out %v image(s)", len(imageJSON)) + fmt.Fprintf(w, strings.Join(imageJSON, ",")) + + fmt.Fprintf(w, `], + "next": "/images?marker=%s&limit=%v", + "schema": "/schemas/images", + "first": "/images?limit=%v"}`, newMarker, limit, limit) + + }) +} + +// HandleImageCreationSuccessfully test setup +func HandleImageCreationSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + th.TestJSONRequest(t, r, `{ + "id": "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "name": "Ubuntu 12.10", + "architecture": "x86_64", + "tags": [ + "ubuntu", + "quantal" + ] + }`) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "status": "queued", + "name": "Ubuntu 12.10", + "protected": false, + "tags": ["ubuntu","quantal"], + "container_format": "bare", + "created_at": "2014-11-11T20:47:55Z", + "disk_format": "qcow2", + "updated_at": "2014-11-11T20:47:55Z", + "visibility": "private", + "self": "/v2/images/e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "min_disk": 0, + "protected": false, + "id": "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "file": "/v2/images/e7db3b45-8db7-47ad-8109-3fb55c2c24fd/file", + "owner": "b4eedccc6fb74fa8a7ad6b08382b852b", + "min_ram": 0, + "schema": "/v2/schemas/image", + "size": 0, + "checksum": "", + "virtual_size": 0, + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`) + }) +} + +// HandleImageCreationSuccessfullyNulls test setup +// JSON null values could be also returned according to behaviour https://bugs.launchpad.net/glance/+bug/1481512 +func HandleImageCreationSuccessfullyNulls(t *testing.T) { + th.Mux.HandleFunc("/images", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + th.TestJSONRequest(t, r, `{ + "id": "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "architecture": "x86_64", + "name": "Ubuntu 12.10", + "tags": [ + "ubuntu", + "quantal" + ] + }`) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "architecture": "x86_64", + "status": "queued", + "name": "Ubuntu 12.10", + "protected": false, + "tags": ["ubuntu","quantal"], + "container_format": "bare", + "created_at": "2014-11-11T20:47:55Z", + "disk_format": "qcow2", + "updated_at": "2014-11-11T20:47:55Z", + "visibility": "private", + "self": "/v2/images/e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "min_disk": 0, + "protected": false, + "id": "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + "file": "/v2/images/e7db3b45-8db7-47ad-8109-3fb55c2c24fd/file", + "owner": "b4eedccc6fb74fa8a7ad6b08382b852b", + "min_ram": 0, + "schema": "/v2/schemas/image", + "size": null, + "checksum": null, + "virtual_size": null + }`) + }) +} + +// HandleImageGetSuccessfully test setup +func HandleImageGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "status": "active", + "name": "cirros-0.3.2-x86_64-disk", + "tags": [], + "container_format": "bare", + "created_at": "2014-05-05T17:15:10Z", + "disk_format": "qcow2", + "updated_at": "2014-05-05T17:15:11Z", + "visibility": "public", + "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "min_disk": 0, + "protected": false, + "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", + "checksum": "64d7c1cd2b6f60c92c14662941cb7913", + "owner": "5ef70662f8b34079a6eddb8da9d75fe8", + "size": 13167616, + "min_ram": 0, + "schema": "/v2/schemas/image", + "virtual_size": null, + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`) + }) +} + +// HandleImageDeleteSuccessfully test setup +func HandleImageDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleImageUpdateSuccessfully setup +func HandleImageUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + th.TestJSONRequest(t, r, `[ + { + "op": "replace", + "path": "/name", + "value": "Fedora 17" + }, + { + "op": "replace", + "path": "/tags", + "value": [ + "fedora", + "beefy" + ] + } + ]`) + + th.AssertEquals(t, "application/openstack-images-v2.1-json-patch", r.Header.Get("Content-Type")) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "name": "Fedora 17", + "status": "active", + "visibility": "public", + "size": 2254249, + "checksum": "2cec138d7dae2aa59038ef8c9aec2390", + "tags": [ + "fedora", + "beefy" + ], + "created_at": "2012-08-10T19:23:50Z", + "updated_at": "2012-08-12T11:11:33Z", + "self": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file", + "schema": "/v2/schemas/image", + "owner": "", + "min_ram": 0, + "min_disk": 0, + "disk_format": "", + "virtual_size": 0, + "container_format": "", + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`) + }) +} + +// HandleImageListByTagsSuccessfully tests a list operation with tags. +func HandleImageListByTagsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "images": [ + { + "status": "active", + "name": "cirros-0.3.2-x86_64-disk", + "tags": ["foo", "bar"], + "container_format": "bare", + "created_at": "2014-05-05T17:15:10Z", + "disk_format": "qcow2", + "updated_at": "2014-05-05T17:15:11Z", + "visibility": "public", + "self": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "min_disk": 0, + "protected": false, + "id": "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + "file": "/v2/images/1bea47ed-f6a9-463b-b423-14b9cca9ad27/file", + "checksum": "64d7c1cd2b6f60c92c14662941cb7913", + "owner": "5ef70662f8b34079a6eddb8da9d75fe8", + "size": 13167616, + "min_ram": 0, + "schema": "/v2/schemas/image", + "virtual_size": null, + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + } + ] + }`) + }) +} + +// HandleImageUpdatePropertiesSuccessfully setup +func HandleImageUpdatePropertiesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + th.TestJSONRequest(t, r, `[ + { + "op": "add", + "path": "/hw_disk_bus", + "value": "scsi" + }, + { + "op": "add", + "path": "/hw_disk_bus_model", + "value": "virtio-scsi" + }, + { + "op": "add", + "path": "/hw_scsi_model", + "value": "virtio-scsi" + } + ]`) + + th.AssertEquals(t, "application/openstack-images-v2.1-json-patch", r.Header.Get("Content-Type")) + + w.WriteHeader(http.StatusOK) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "name": "Fedora 17", + "status": "active", + "visibility": "public", + "size": 2254249, + "checksum": "2cec138d7dae2aa59038ef8c9aec2390", + "tags": [ + "fedora", + "beefy" + ], + "created_at": "2012-08-10T19:23:50Z", + "updated_at": "2012-08-12T11:11:33Z", + "self": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "file": "/v2/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/file", + "schema": "/v2/schemas/image", + "owner": "", + "min_ram": 0, + "min_disk": 0, + "disk_format": "", + "virtual_size": 0, + "container_format": "", + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi" + }`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/requests_test.go new file mode 100644 index 000000000000..ed9a344f49b3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/testing/requests_test.go @@ -0,0 +1,458 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/images" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageListSuccessfully(t) + + t.Logf("Test setup %+v\n", th.Server) + + t.Logf("Id\tName\tOwner\tChecksum\tSizeBytes") + + pager := images.List(fakeclient.ServiceClient(), images.ListOpts{Limit: 1}) + t.Logf("Pager state %v", pager) + count, pages := 0, 0 + err := pager.EachPage(func(page pagination.Page) (bool, error) { + pages++ + t.Logf("Page %v", page) + images, err := images.ExtractImages(page) + if err != nil { + return false, err + } + + for _, i := range images { + t.Logf("%s\t%s\t%s\t%s\t%v\t\n", i.ID, i.Name, i.Owner, i.Checksum, i.SizeBytes) + count++ + } + + return true, nil + }) + th.AssertNoErr(t, err) + + t.Logf("--------\n%d images listed on %d pages.\n", count, pages) + th.AssertEquals(t, 3, pages) + th.AssertEquals(t, 3, count) +} + +func TestAllPagesImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageListSuccessfully(t) + + pages, err := images.List(fakeclient.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + images, err := images.ExtractImages(pages) + th.AssertNoErr(t, err) + th.AssertEquals(t, 3, len(images)) +} + +func TestCreateImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageCreationSuccessfully(t) + + id := "e7db3b45-8db7-47ad-8109-3fb55c2c24fd" + name := "Ubuntu 12.10" + + actualImage, err := images.Create(fakeclient.ServiceClient(), images.CreateOpts{ + ID: id, + Name: name, + Properties: map[string]string{ + "architecture": "x86_64", + }, + Tags: []string{"ubuntu", "quantal"}, + }).Extract() + + th.AssertNoErr(t, err) + + containerFormat := "bare" + diskFormat := "qcow2" + owner := "b4eedccc6fb74fa8a7ad6b08382b852b" + minDiskGigabytes := 0 + minRAMMegabytes := 0 + file := actualImage.File + createdDate := actualImage.CreatedAt + lastUpdate := actualImage.UpdatedAt + schema := "/v2/schemas/image" + + expectedImage := images.Image{ + ID: "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + Name: "Ubuntu 12.10", + Tags: []string{"ubuntu", "quantal"}, + + Status: images.ImageStatusQueued, + + ContainerFormat: containerFormat, + DiskFormat: diskFormat, + + MinDiskGigabytes: minDiskGigabytes, + MinRAMMegabytes: minRAMMegabytes, + + Owner: owner, + + Visibility: images.ImageVisibilityPrivate, + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + VirtualSize: 0, + Properties: map[string]interface{}{ + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi", + }, + } + + th.AssertDeepEquals(t, &expectedImage, actualImage) +} + +func TestCreateImageNulls(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageCreationSuccessfullyNulls(t) + + id := "e7db3b45-8db7-47ad-8109-3fb55c2c24fd" + name := "Ubuntu 12.10" + + actualImage, err := images.Create(fakeclient.ServiceClient(), images.CreateOpts{ + ID: id, + Name: name, + Tags: []string{"ubuntu", "quantal"}, + Properties: map[string]string{ + "architecture": "x86_64", + }, + }).Extract() + + th.AssertNoErr(t, err) + + containerFormat := "bare" + diskFormat := "qcow2" + owner := "b4eedccc6fb74fa8a7ad6b08382b852b" + minDiskGigabytes := 0 + minRAMMegabytes := 0 + file := actualImage.File + createdDate := actualImage.CreatedAt + lastUpdate := actualImage.UpdatedAt + schema := "/v2/schemas/image" + properties := map[string]interface{}{ + "architecture": "x86_64", + } + sizeBytes := int64(0) + + expectedImage := images.Image{ + ID: "e7db3b45-8db7-47ad-8109-3fb55c2c24fd", + Name: "Ubuntu 12.10", + Tags: []string{"ubuntu", "quantal"}, + + Status: images.ImageStatusQueued, + + ContainerFormat: containerFormat, + DiskFormat: diskFormat, + + MinDiskGigabytes: minDiskGigabytes, + MinRAMMegabytes: minRAMMegabytes, + + Owner: owner, + + Visibility: images.ImageVisibilityPrivate, + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + Properties: properties, + SizeBytes: sizeBytes, + } + + th.AssertDeepEquals(t, &expectedImage, actualImage) +} + +func TestGetImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageGetSuccessfully(t) + + actualImage, err := images.Get(fakeclient.ServiceClient(), "1bea47ed-f6a9-463b-b423-14b9cca9ad27").Extract() + + th.AssertNoErr(t, err) + + checksum := "64d7c1cd2b6f60c92c14662941cb7913" + sizeBytes := int64(13167616) + containerFormat := "bare" + diskFormat := "qcow2" + minDiskGigabytes := 0 + minRAMMegabytes := 0 + owner := "5ef70662f8b34079a6eddb8da9d75fe8" + file := actualImage.File + createdDate := actualImage.CreatedAt + lastUpdate := actualImage.UpdatedAt + schema := "/v2/schemas/image" + + expectedImage := images.Image{ + ID: "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + Name: "cirros-0.3.2-x86_64-disk", + Tags: []string{}, + + Status: images.ImageStatusActive, + + ContainerFormat: containerFormat, + DiskFormat: diskFormat, + + MinDiskGigabytes: minDiskGigabytes, + MinRAMMegabytes: minRAMMegabytes, + + Owner: owner, + + Protected: false, + Visibility: images.ImageVisibilityPublic, + + Checksum: checksum, + SizeBytes: sizeBytes, + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + VirtualSize: 0, + Properties: map[string]interface{}{ + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi", + }, + } + + th.AssertDeepEquals(t, &expectedImage, actualImage) +} + +func TestDeleteImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageDeleteSuccessfully(t) + + result := images.Delete(fakeclient.ServiceClient(), "1bea47ed-f6a9-463b-b423-14b9cca9ad27") + th.AssertNoErr(t, result.Err) +} + +func TestUpdateImage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageUpdateSuccessfully(t) + + actualImage, err := images.Update(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", images.UpdateOpts{ + images.ReplaceImageName{NewName: "Fedora 17"}, + images.ReplaceImageTags{NewTags: []string{"fedora", "beefy"}}, + }).Extract() + + th.AssertNoErr(t, err) + + sizebytes := int64(2254249) + checksum := "2cec138d7dae2aa59038ef8c9aec2390" + file := actualImage.File + createdDate := actualImage.CreatedAt + lastUpdate := actualImage.UpdatedAt + schema := "/v2/schemas/image" + + expectedImage := images.Image{ + ID: "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + Name: "Fedora 17", + Status: images.ImageStatusActive, + Visibility: images.ImageVisibilityPublic, + + SizeBytes: sizebytes, + Checksum: checksum, + + Tags: []string{ + "fedora", + "beefy", + }, + + Owner: "", + MinRAMMegabytes: 0, + MinDiskGigabytes: 0, + + DiskFormat: "", + ContainerFormat: "", + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + VirtualSize: 0, + Properties: map[string]interface{}{ + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi", + }, + } + + th.AssertDeepEquals(t, &expectedImage, actualImage) +} + +func TestImageDateQuery(t *testing.T) { + date := time.Date(2014, 1, 1, 1, 1, 1, 0, time.UTC) + + listOpts := images.ListOpts{ + CreatedAtQuery: &images.ImageDateQuery{ + Date: date, + Filter: images.FilterGTE, + }, + UpdatedAtQuery: &images.ImageDateQuery{ + Date: date, + }, + } + + expectedQueryString := "?created_at=gte%3A2014-01-01T01%3A01%3A01Z&updated_at=2014-01-01T01%3A01%3A01Z" + actualQueryString, err := listOpts.ToImageListQuery() + th.AssertNoErr(t, err) + th.AssertEquals(t, expectedQueryString, actualQueryString) +} + +func TestImageListByTags(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageListByTagsSuccessfully(t) + + listOpts := images.ListOpts{ + Tags: []string{"foo", "bar"}, + } + + expectedQueryString := "?tag=foo&tag=bar" + actualQueryString, err := listOpts.ToImageListQuery() + th.AssertNoErr(t, err) + th.AssertEquals(t, expectedQueryString, actualQueryString) + + pages, err := images.List(fakeclient.ServiceClient(), listOpts).AllPages() + th.AssertNoErr(t, err) + allImages, err := images.ExtractImages(pages) + th.AssertNoErr(t, err) + + checksum := "64d7c1cd2b6f60c92c14662941cb7913" + sizeBytes := int64(13167616) + containerFormat := "bare" + diskFormat := "qcow2" + minDiskGigabytes := 0 + minRAMMegabytes := 0 + owner := "5ef70662f8b34079a6eddb8da9d75fe8" + file := allImages[0].File + createdDate := allImages[0].CreatedAt + lastUpdate := allImages[0].UpdatedAt + schema := "/v2/schemas/image" + tags := []string{"foo", "bar"} + + expectedImage := images.Image{ + ID: "1bea47ed-f6a9-463b-b423-14b9cca9ad27", + Name: "cirros-0.3.2-x86_64-disk", + Tags: tags, + + Status: images.ImageStatusActive, + + ContainerFormat: containerFormat, + DiskFormat: diskFormat, + + MinDiskGigabytes: minDiskGigabytes, + MinRAMMegabytes: minRAMMegabytes, + + Owner: owner, + + Protected: false, + Visibility: images.ImageVisibilityPublic, + + Checksum: checksum, + SizeBytes: sizeBytes, + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + VirtualSize: 0, + Properties: map[string]interface{}{ + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi", + }, + } + + th.AssertDeepEquals(t, expectedImage, allImages[0]) +} + +func TestUpdateImageProperties(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageUpdatePropertiesSuccessfully(t) + + actualImage, err := images.Update(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", images.UpdateOpts{ + images.UpdateImageProperty{ + Op: images.AddOp, + Name: "hw_disk_bus", + Value: "scsi", + }, + images.UpdateImageProperty{ + Op: images.AddOp, + Name: "hw_disk_bus_model", + Value: "virtio-scsi", + }, + images.UpdateImageProperty{ + Op: images.AddOp, + Name: "hw_scsi_model", + Value: "virtio-scsi", + }, + }).Extract() + + th.AssertNoErr(t, err) + + sizebytes := int64(2254249) + checksum := "2cec138d7dae2aa59038ef8c9aec2390" + file := actualImage.File + createdDate := actualImage.CreatedAt + lastUpdate := actualImage.UpdatedAt + schema := "/v2/schemas/image" + + expectedImage := images.Image{ + ID: "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + Name: "Fedora 17", + Status: images.ImageStatusActive, + Visibility: images.ImageVisibilityPublic, + + SizeBytes: sizebytes, + Checksum: checksum, + + Tags: []string{ + "fedora", + "beefy", + }, + + Owner: "", + MinRAMMegabytes: 0, + MinDiskGigabytes: 0, + + DiskFormat: "", + ContainerFormat: "", + File: file, + CreatedAt: createdDate, + UpdatedAt: lastUpdate, + Schema: schema, + VirtualSize: 0, + Properties: map[string]interface{}{ + "hw_disk_bus": "scsi", + "hw_disk_bus_model": "virtio-scsi", + "hw_scsi_model": "virtio-scsi", + }, + } + + th.AssertDeepEquals(t, &expectedImage, actualImage) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go new file mode 100644 index 000000000000..d2f9cbd3bfbd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/types.go @@ -0,0 +1,104 @@ +package images + +import ( + "time" +) + +// ImageStatus image statuses +// http://docs.openstack.org/developer/glance/statuses.html +type ImageStatus string + +const ( + // ImageStatusQueued is a status for an image which identifier has + // been reserved for an image in the image registry. + ImageStatusQueued ImageStatus = "queued" + + // ImageStatusSaving denotes that an image’s raw data is currently being + // uploaded to Glance + ImageStatusSaving ImageStatus = "saving" + + // ImageStatusActive denotes an image that is fully available in Glance. + ImageStatusActive ImageStatus = "active" + + // ImageStatusKilled denotes that an error occurred during the uploading + // of an image’s data, and that the image is not readable. + ImageStatusKilled ImageStatus = "killed" + + // ImageStatusDeleted is used for an image that is no longer available to use. + // The image information is retained in the image registry. + ImageStatusDeleted ImageStatus = "deleted" + + // ImageStatusPendingDelete is similar to Delete, but the image is not yet + // deleted. + ImageStatusPendingDelete ImageStatus = "pending_delete" + + // ImageStatusDeactivated denotes that access to image data is not allowed to + // any non-admin user. + ImageStatusDeactivated ImageStatus = "deactivated" +) + +// ImageVisibility denotes an image that is fully available in Glance. +// This occurs when the image data is uploaded, or the image size is explicitly +// set to zero on creation. +// According to design +// https://wiki.openstack.org/wiki/Glance-v2-community-image-visibility-design +type ImageVisibility string + +const ( + // ImageVisibilityPublic all users + ImageVisibilityPublic ImageVisibility = "public" + + // ImageVisibilityPrivate users with tenantId == tenantId(owner) + ImageVisibilityPrivate ImageVisibility = "private" + + // ImageVisibilityShared images are visible to: + // - users with tenantId == tenantId(owner) + // - users with tenantId in the member-list of the image + // - users with tenantId in the member-list with member_status == 'accepted' + ImageVisibilityShared ImageVisibility = "shared" + + // ImageVisibilityCommunity images: + // - all users can see and boot it + // - users with tenantId in the member-list of the image with + // member_status == 'accepted' have this image in their default image-list. + ImageVisibilityCommunity ImageVisibility = "community" +) + +// MemberStatus is a status for adding a new member (tenant) to an image +// member list. +type ImageMemberStatus string + +const ( + // ImageMemberStatusAccepted is the status for an accepted image member. + ImageMemberStatusAccepted ImageMemberStatus = "accepted" + + // ImageMemberStatusPending shows that the member addition is pending + ImageMemberStatusPending ImageMemberStatus = "pending" + + // ImageMemberStatusAccepted is the status for a rejected image member + ImageMemberStatusRejected ImageMemberStatus = "rejected" + + // ImageMemberStatusAll + ImageMemberStatusAll ImageMemberStatus = "all" +) + +// ImageDateFilter represents a valid filter to use for filtering +// images by their date during a List. +type ImageDateFilter string + +const ( + FilterGT ImageDateFilter = "gt" + FilterGTE ImageDateFilter = "gte" + FilterLT ImageDateFilter = "lt" + FilterLTE ImageDateFilter = "lte" + FilterNEQ ImageDateFilter = "neq" + FilterEQ ImageDateFilter = "eq" +) + +// ImageDateQuery represents a date field to be used for listing images. +// If no filter is specified, the query will act as though FilterEQ was +// set. +type ImageDateQuery struct { + Date time.Time + Filter ImageDateFilter +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go new file mode 100644 index 000000000000..1780c3c6ca70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/images/urls.go @@ -0,0 +1,65 @@ +package images + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +// `listURL` is a pure function. `listURL(c)` is a URL for which a GET +// request will respond with a list of images in the service `c`. +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("images") +} + +// `imageURL(c,i)` is the URL for the image identified by ID `i` in +// the service `c`. +func imageURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL("images", imageID) +} + +// `getURL(c,i)` is a URL for which a GET request will respond with +// information about the image identified by ID `i` in the service +// `c`. +func getURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func updateURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +func deleteURL(c *gophercloud.ServiceClient, imageID string) string { + return imageURL(c, imageID) +} + +// builds next page full url based on current url +func nextPageURL(serviceURL, requestedNext string) (string, error) { + base, err := utils.BaseEndpoint(serviceURL) + if err != nil { + return "", err + } + + requestedNextURL, err := url.Parse(requestedNext) + if err != nil { + return "", err + } + + base = gophercloud.NormalizeURL(base) + nextPath := base + strings.TrimPrefix(requestedNextURL.Path, "/") + + nextURL, err := url.Parse(nextPath) + if err != nil { + return "", err + } + + nextURL.RawQuery = requestedNextURL.RawQuery + + return nextURL.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go new file mode 100644 index 000000000000..1a7132045f87 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/doc.go @@ -0,0 +1,58 @@ +/* +Package members enables management and retrieval of image members. + +Members are projects other than the image owner who have access to the image. + +Example to List Members of an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + + allPages, err := members.List(imageID).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := range allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Add a Member to an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + member, err := members.Create(imageClient, imageID, projectID).Extract() + if err != nil { + panic(err) + } + +Example to Update the Status of a Member + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + updateOpts := members.UpdateOpts{ + Status: "accepted", + } + + member, err := members.Update(imageClient, imageID, projectID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member from an Image + + imageID := "2b6cacd4-cfd6-4b95-8302-4c04ccf0be3f" + projectID := "fc404778935a4cebaddcb4788fb3ff2c" + + err := members.Delete(imageClient, imageID, projectID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go new file mode 100644 index 000000000000..b80e54e83c8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/requests.go @@ -0,0 +1,81 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +/* + Create member for specific image + + Preconditions + + * The specified images must exist. + * You can only add a new member to an image which 'visibility' attribute is + private. + * You must be the owner of the specified image. + + Synchronous Postconditions + + With correct permissions, you can see the member status of the image as + pending through API calls. + + More details here: + http://developer.openstack.org/api-ref-image-v2.html#createImageMember-v2 +*/ +func Create(client *gophercloud.ServiceClient, id string, member string) (r CreateResult) { + b := map[string]interface{}{"member": member} + _, r.Err = client.Post(createMemberURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// List members returns list of members for specifed image id. +func List(client *gophercloud.ServiceClient, id string) pagination.Pager { + return pagination.NewPager(client, listMembersURL(client, id), func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.SinglePageBase(r)} + }) +} + +// Get image member details. +func Get(client *gophercloud.ServiceClient, imageID string, memberID string) (r DetailsResult) { + _, r.Err = client.Get(getMemberURL(client, imageID, memberID), &r.Body, &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} + +// Delete membership for given image. Callee should be image owner. +func Delete(client *gophercloud.ServiceClient, imageID string, memberID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteMemberURL(client, imageID, memberID), &gophercloud.RequestOpts{OkCodes: []int{204}}) + return +} + +// UpdateOptsBuilder allows extensions to add additional attributes to the +// Update request. +type UpdateOptsBuilder interface { + ToImageMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options to an Update request. +type UpdateOpts struct { + Status string +} + +// ToMemberUpdateMap formats an UpdateOpts structure into a request body. +func (opts UpdateOpts) ToImageMemberUpdateMap() (map[string]interface{}, error) { + return map[string]interface{}{ + "status": opts.Status, + }, nil +} + +// Update function updates member. +func Update(client *gophercloud.ServiceClient, imageID string, memberID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToImageMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateMemberURL(client, imageID, memberID), b, &r.Body, + &gophercloud.RequestOpts{OkCodes: []int{200}}) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go new file mode 100644 index 000000000000..ab694bdc0fbd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/results.go @@ -0,0 +1,74 @@ +package members + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Member represents a member of an Image. +type Member struct { + CreatedAt time.Time `json:"created_at"` + ImageID string `json:"image_id"` + MemberID string `json:"member_id"` + Schema string `json:"schema"` + Status string `json:"status"` + UpdatedAt time.Time `json:"updated_at"` +} + +// Extract Member model from a request. +func (r commonResult) Extract() (*Member, error) { + var s *Member + err := r.ExtractInto(&s) + return s, err +} + +// MemberPage is a single page of Members results. +type MemberPage struct { + pagination.SinglePageBase +} + +// ExtractMembers returns a slice of Members contained in a single page +// of results. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := r.(MemberPage).ExtractInto(&s) + return s.Members, err +} + +// IsEmpty determines whether or not a MemberPage contains any results. +func (r MemberPage) IsEmpty() (bool, error) { + members, err := ExtractMembers(r) + return len(members) == 0, err +} + +type commonResult struct { + gophercloud.Result +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Member. +type CreateResult struct { + commonResult +} + +// DetailsResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Member. +type DetailsResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as a Member. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/doc.go new file mode 100644 index 000000000000..1afbc434f603 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/doc.go @@ -0,0 +1,2 @@ +// members unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/fixtures.go new file mode 100644 index 000000000000..c08fc5ebaa8c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/fixtures.go @@ -0,0 +1,138 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleCreateImageMemberSuccessfully setup +func HandleCreateImageMemberSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + th.TestJSONRequest(t, r, `{"member": "8989447062e04a818baf9e073fd04fa7"}`) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "created_at": "2013-09-20T19:22:19Z", + "image_id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "member_id": "8989447062e04a818baf9e073fd04fa7", + "schema": "/v2/schemas/member", + "status": "pending", + "updated_at": "2013-09-20T19:25:31Z" + }`) + + }) +} + +// HandleImageMemberList happy path setup +func HandleImageMemberList(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "members": [ + { + "created_at": "2013-10-07T17:58:03Z", + "image_id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "member_id": "123456789", + "schema": "/v2/schemas/member", + "status": "pending", + "updated_at": "2013-10-07T17:58:03Z" + }, + { + "created_at": "2013-10-07T17:58:55Z", + "image_id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "member_id": "987654321", + "schema": "/v2/schemas/member", + "status": "accepted", + "updated_at": "2013-10-08T12:08:55Z" + } + ], + "schema": "/v2/schemas/members" + }`) + }) +} + +// HandleImageMemberEmptyList happy path setup +func HandleImageMemberEmptyList(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ + "members": [], + "schema": "/v2/schemas/members" + }`) + }) +} + +// HandleImageMemberDetails setup +func HandleImageMemberDetails(t *testing.T) { + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members/8989447062e04a818baf9e073fd04fa7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{ + "status": "pending", + "created_at": "2013-11-26T07:21:21Z", + "updated_at": "2013-11-26T07:21:21Z", + "image_id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "member_id": "8989447062e04a818baf9e073fd04fa7", + "schema": "/v2/schemas/member" + }`) + }) +} + +// HandleImageMemberDeleteSuccessfully setup +func HandleImageMemberDeleteSuccessfully(t *testing.T) *CallsCounter { + var counter CallsCounter + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members/8989447062e04a818baf9e073fd04fa7", func(w http.ResponseWriter, r *http.Request) { + counter.Counter = counter.Counter + 1 + + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) + return &counter +} + +// HandleImageMemberUpdate setup +func HandleImageMemberUpdate(t *testing.T) *CallsCounter { + var counter CallsCounter + th.Mux.HandleFunc("/images/da3b75d9-3f4a-40e7-8a2c-bfab23927dea/members/8989447062e04a818baf9e073fd04fa7", func(w http.ResponseWriter, r *http.Request) { + counter.Counter = counter.Counter + 1 + + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + th.TestJSONRequest(t, r, `{"status": "accepted"}`) + + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, `{ + "status": "accepted", + "created_at": "2013-11-26T07:21:21Z", + "updated_at": "2013-11-26T07:21:21Z", + "image_id": "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "member_id": "8989447062e04a818baf9e073fd04fa7", + "schema": "/v2/schemas/member" + }`) + }) + return &counter +} + +// CallsCounter for checking if request handler was called at all +type CallsCounter struct { + Counter int +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/requests_test.go new file mode 100644 index 000000000000..04624c993786 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/testing/requests_test.go @@ -0,0 +1,172 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/members" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +const createdAtString = "2013-09-20T19:22:19Z" +const updatedAtString = "2013-09-20T19:25:31Z" + +func TestCreateMemberSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreateImageMemberSuccessfully(t) + im, err := members.Create(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "8989447062e04a818baf9e073fd04fa7").Extract() + th.AssertNoErr(t, err) + + createdAt, err := time.Parse(time.RFC3339, createdAtString) + th.AssertNoErr(t, err) + + updatedAt, err := time.Parse(time.RFC3339, updatedAtString) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, members.Member{ + CreatedAt: createdAt, + ImageID: "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + MemberID: "8989447062e04a818baf9e073fd04fa7", + Schema: "/v2/schemas/member", + Status: "pending", + UpdatedAt: updatedAt, + }, *im) + +} + +func TestMemberListSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageMemberList(t) + + pager := members.List(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea") + t.Logf("Pager state %v", pager) + count, pages := 0, 0 + err := pager.EachPage(func(page pagination.Page) (bool, error) { + pages++ + t.Logf("Page %v", page) + members, err := members.ExtractMembers(page) + if err != nil { + return false, err + } + + for _, i := range members { + t.Logf("%s\t%s\t%s\t%s\t\n", i.ImageID, i.MemberID, i.Status, i.Schema) + count++ + } + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 1, pages) + th.AssertEquals(t, 2, count) +} + +func TestMemberListEmpty(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageMemberEmptyList(t) + + pager := members.List(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea") + t.Logf("Pager state %v", pager) + count, pages := 0, 0 + err := pager.EachPage(func(page pagination.Page) (bool, error) { + pages++ + t.Logf("Page %v", page) + members, err := members.ExtractMembers(page) + if err != nil { + return false, err + } + + for _, i := range members { + t.Logf("%s\t%s\t%s\t%s\t\n", i.ImageID, i.MemberID, i.Status, i.Schema) + count++ + } + + return true, nil + }) + + th.AssertNoErr(t, err) + th.AssertEquals(t, 0, pages) + th.AssertEquals(t, 0, count) +} + +func TestShowMemberDetails(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleImageMemberDetails(t) + md, err := members.Get(fakeclient.ServiceClient(), + "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "8989447062e04a818baf9e073fd04fa7").Extract() + + th.AssertNoErr(t, err) + if md == nil { + t.Errorf("Expected non-nil value for md") + } + + createdAt, err := time.Parse(time.RFC3339, "2013-11-26T07:21:21Z") + th.AssertNoErr(t, err) + + updatedAt, err := time.Parse(time.RFC3339, "2013-11-26T07:21:21Z") + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, members.Member{ + CreatedAt: createdAt, + ImageID: "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + MemberID: "8989447062e04a818baf9e073fd04fa7", + Schema: "/v2/schemas/member", + Status: "pending", + UpdatedAt: updatedAt, + }, *md) +} + +func TestDeleteMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + counter := HandleImageMemberDeleteSuccessfully(t) + + result := members.Delete(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "8989447062e04a818baf9e073fd04fa7") + th.AssertEquals(t, 1, counter.Counter) + th.AssertNoErr(t, result.Err) +} + +func TestMemberUpdateSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + counter := HandleImageMemberUpdate(t) + im, err := members.Update(fakeclient.ServiceClient(), "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + "8989447062e04a818baf9e073fd04fa7", + members.UpdateOpts{ + Status: "accepted", + }).Extract() + th.AssertEquals(t, 1, counter.Counter) + th.AssertNoErr(t, err) + + createdAt, err := time.Parse(time.RFC3339, "2013-11-26T07:21:21Z") + th.AssertNoErr(t, err) + + updatedAt, err := time.Parse(time.RFC3339, "2013-11-26T07:21:21Z") + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, members.Member{ + CreatedAt: createdAt, + ImageID: "da3b75d9-3f4a-40e7-8a2c-bfab23927dea", + MemberID: "8989447062e04a818baf9e073fd04fa7", + Schema: "/v2/schemas/member", + Status: "accepted", + UpdatedAt: updatedAt, + }, *im) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/urls.go new file mode 100644 index 000000000000..0898364e7d4c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/members/urls.go @@ -0,0 +1,31 @@ +package members + +import "github.com/gophercloud/gophercloud" + +func imageMembersURL(c *gophercloud.ServiceClient, imageID string) string { + return c.ServiceURL("images", imageID, "members") +} + +func listMembersURL(c *gophercloud.ServiceClient, imageID string) string { + return imageMembersURL(c, imageID) +} + +func createMemberURL(c *gophercloud.ServiceClient, imageID string) string { + return imageMembersURL(c, imageID) +} + +func imageMemberURL(c *gophercloud.ServiceClient, imageID string, memberID string) string { + return c.ServiceURL("images", imageID, "members", memberID) +} + +func getMemberURL(c *gophercloud.ServiceClient, imageID string, memberID string) string { + return imageMemberURL(c, imageID, memberID) +} + +func updateMemberURL(c *gophercloud.ServiceClient, imageID string, memberID string) string { + return imageMemberURL(c, imageID, memberID) +} + +func deleteMemberURL(c *gophercloud.ServiceClient, imageID string, memberID string) string { + return imageMemberURL(c, imageID, memberID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/doc.go new file mode 100644 index 000000000000..28ed82e55c0c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/doc.go @@ -0,0 +1,55 @@ +/* +Package tasks enables management and retrieval of tasks from the OpenStack +Imageservice. + +Example to List Tasks + + listOpts := tasks.ListOpts{ + Owner: "424e7cf0243c468ca61732ba45973b3e", + } + + allPages, err := tasks.List(imagesClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allTasks, err := tasks.ExtractTasks(allPages) + if err != nil { + panic(err) + } + + for _, task := range allTasks { + fmt.Printf("%+v\n", task) + } + +Example to Get a Task + + task, err := tasks.Get(imagesClient, "1252f636-1246-4319-bfba-c47cde0efbe0").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", task) + +Example to Create a Task + + createOpts := tasks.CreateOpts{ + Type: "import", + Input: map[string]interface{}{ + "image_properties": map[string]interface{}{ + "container_format": "bare", + "disk_format": "raw", + }, + "import_from_format": "raw", + "import_from": "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img", + }, + } + + task, err := tasks.Create(imagesClient, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", task) +*/ +package tasks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/requests.go new file mode 100644 index 000000000000..94fe45d9c5e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/requests.go @@ -0,0 +1,128 @@ +package tasks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// TaskStatus represents valid task status. +// You can use this type to compare the actual status of a task to a one of the +// pre-defined statuses. +type TaskStatus string + +const ( + // TaskStatusPending represents status of the pending task. + TaskStatusPending TaskStatus = "pending" + + // TaskStatusProcessing represents status of the processing task. + TaskStatusProcessing TaskStatus = "processing" + + // TaskStatusSuccess represents status of the success task. + TaskStatusSuccess TaskStatus = "success" + + // TaskStatusFailure represents status of the failure task. + TaskStatusFailure TaskStatus = "failure" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToTaskListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the OpenStack Imageservice tasks API. +type ListOpts struct { + // Integer value for the limit of values to return. + Limit int `q:"limit"` + + // ID of the task at which you want to set a marker. + Marker string `q:"marker"` + + // SortDir allows to select sort direction. + // It can be "asc" or "desc" (default). + SortDir string `q:"sort_dir"` + + // SortKey allows to sort by one of the following tTask attributes: + // - created_at + // - expires_at + // - status + // - type + // - updated_at + // Default is created_at. + SortKey string `q:"sort_key"` + + // ID filters on the identifier of the task. + ID string `json:"id"` + + // Type filters on the type of the task. + Type string `json:"type"` + + // Status filters on the status of the task. + Status TaskStatus `q:"status"` +} + +// ToTaskListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToTaskListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of the tasks. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToTaskListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + taskPage := TaskPage{ + serviceURL: c.ServiceURL(), + LinkedPageBase: pagination.LinkedPageBase{PageResult: r}, + } + + return taskPage + }) +} + +// Get retrieves a specific Imageservice task based on its ID. +func Get(c *gophercloud.ServiceClient, taskID string) (r GetResult) { + _, r.Err = c.Get(getURL(c, taskID), &r.Body, nil) + return +} + +// CreateOptsBuilder allows to add additional parameters to the Create request. +type CreateOptsBuilder interface { + ToTaskCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new Imageservice task. +type CreateOpts struct { + Type string `json:"type" required:"true"` + Input map[string]interface{} `json:"input"` +} + +// ToTaskCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToTaskCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + return b, nil +} + +// Create requests the creation of a new Imageservice task on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToTaskCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/results.go new file mode 100644 index 000000000000..3a7f5ca12b34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/results.go @@ -0,0 +1,114 @@ +package tasks + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Task. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Task. +type CreateResult struct { + commonResult +} + +// Task represents a single task of the OpenStack Image service. +type Task struct { + // ID is a unique identifier of the task. + ID string `json:"id"` + + // Type represents the type of the task. + Type string `json:"type"` + + // Status represents current status of the task. + // You can use the TaskStatus custom type to unmarshal raw JSON response into + // the pre-defined valid task status. + Status string `json:"status"` + + // Input represents different parameters for the task. + Input map[string]interface{} `json:"input"` + + // Result represents task result details. + Result map[string]interface{} `json:"result"` + + // Owner is a unique identifier of the task owner. + Owner string `json:"owner"` + + // Message represents human-readable message that is usually populated + // on task failure. + Message string `json:"message"` + + // ExpiresAt contains the timestamp of when the task will become a subject of + // removal. + ExpiresAt time.Time `json:"expires_at"` + + // CreatedAt contains the task creation timestamp. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt contains the latest timestamp of when the task was updated. + UpdatedAt time.Time `json:"updated_at"` + + // Self contains URI for the task. + Self string `json:"self"` + + // Schema the path to the JSON-schema that represent the task. + Schema string `json:"schema"` +} + +// Extract interprets any commonResult as a Task. +func (r commonResult) Extract() (*Task, error) { + var s *Task + err := r.ExtractInto(&s) + return s, err +} + +// TaskPage represents the results of a List request. +type TaskPage struct { + serviceURL string + pagination.LinkedPageBase +} + +// IsEmpty returns true if a TaskPage contains no Tasks results. +func (r TaskPage) IsEmpty() (bool, error) { + tasks, err := ExtractTasks(r) + return len(tasks) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to +// the next page of results. +func (r TaskPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + if s.Next == "" { + return "", nil + } + + return nextPageURL(r.serviceURL, s.Next) +} + +// ExtractTasks interprets the results of a single page from a List() call, +// producing a slice of Task entities. +func ExtractTasks(r pagination.Page) ([]Task, error) { + var s struct { + Tasks []Task `json:"tasks"` + } + err := (r.(TaskPage)).ExtractInto(&s) + return s.Tasks, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/doc.go new file mode 100644 index 000000000000..8d4a768447ec --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/doc.go @@ -0,0 +1,2 @@ +// tasks unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/fixtures.go new file mode 100644 index 000000000000..6a1a9d3e93d3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/fixtures.go @@ -0,0 +1,124 @@ +package testing + +import ( + "time" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks" +) + +// TasksListResult represents raw server response from a server to a list call. +const TasksListResult = ` +{ + "schema": "/v2/schemas/tasks", + "tasks": [ + { + "status": "pending", + "self": "/v2/tasks/1252f636-1246-4319-bfba-c47cde0efbe0", + "updated_at": "2018-07-25T08:59:14Z", + "id": "1252f636-1246-4319-bfba-c47cde0efbe0", + "owner": "424e7cf0243c468ca61732ba45973b3e", + "type": "import", + "created_at": "2018-07-25T08:59:13Z", + "schema": "/v2/schemas/task" + }, + { + "status": "processing", + "self": "/v2/tasks/349a51f4-d51d-47b6-82da-4fa516f0ca32", + "updated_at": "2018-07-25T08:56:19Z", + "id": "349a51f4-d51d-47b6-82da-4fa516f0ca32", + "owner": "fb57277ef2f84a0e85b9018ec2dedbf7", + "type": "import", + "created_at": "2018-07-25T08:56:17Z", + "schema": "/v2/schemas/task" + } + ], + "first": "/v2/tasks?sort_key=status&sort_dir=desc&limit=20" +} +` + +// Task1 is an expected representation of a first task from the TasksListResult. +var Task1 = tasks.Task{ + ID: "1252f636-1246-4319-bfba-c47cde0efbe0", + Status: string(tasks.TaskStatusPending), + Type: "import", + Owner: "424e7cf0243c468ca61732ba45973b3e", + CreatedAt: time.Date(2018, 7, 25, 8, 59, 13, 0, time.UTC), + UpdatedAt: time.Date(2018, 7, 25, 8, 59, 14, 0, time.UTC), + Self: "/v2/tasks/1252f636-1246-4319-bfba-c47cde0efbe0", + Schema: "/v2/schemas/task", +} + +// Task2 is an expected representation of a first task from the TasksListResult. +var Task2 = tasks.Task{ + ID: "349a51f4-d51d-47b6-82da-4fa516f0ca32", + Status: string(tasks.TaskStatusProcessing), + Type: "import", + Owner: "fb57277ef2f84a0e85b9018ec2dedbf7", + CreatedAt: time.Date(2018, 7, 25, 8, 56, 17, 0, time.UTC), + UpdatedAt: time.Date(2018, 7, 25, 8, 56, 19, 0, time.UTC), + Self: "/v2/tasks/349a51f4-d51d-47b6-82da-4fa516f0ca32", + Schema: "/v2/schemas/task", +} + +// TasksGetResult represents raw server response from a server to a get call. +const TasksGetResult = ` +{ + "status": "pending", + "created_at": "2018-07-25T08:59:13Z", + "updated_at": "2018-07-25T08:59:14Z", + "self": "/v2/tasks/1252f636-1246-4319-bfba-c47cde0efbe0", + "result": null, + "owner": "424e7cf0243c468ca61732ba45973b3e", + "input": { + "image_properties": { + "container_format": "bare", + "disk_format": "raw" + }, + "import_from_format": "raw", + "import_from": "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img" + }, + "message": "", + "type": "import", + "id": "1252f636-1246-4319-bfba-c47cde0efbe0", + "schema": "/v2/schemas/task" +} +` + +// TaskCreateRequest represents a request to create a task. +const TaskCreateRequest = ` +{ + "input": { + "image_properties": { + "container_format": "bare", + "disk_format": "raw" + }, + "import_from_format": "raw", + "import_from": "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img" + }, + "type": "import" +} +` + +// TaskCreateResult represents a raw server response to the TaskCreateRequest. +const TaskCreateResult = ` +{ + "status": "pending", + "created_at": "2018-07-25T11:07:54Z", + "updated_at": "2018-07-25T11:07:54Z", + "self": "/v2/tasks/d550c87d-86ed-430a-9895-c7a1f5ce87e9", + "result": null, + "owner": "fb57277ef2f84a0e85b9018ec2dedbf7", + "input": { + "image_properties": { + "container_format": "bare", + "disk_format": "raw" + }, + "import_from_format": "raw", + "import_from": "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img" + }, + "message": "", + "type": "import", + "id": "d550c87d-86ed-430a-9895-c7a1f5ce87e9", + "schema": "/v2/schemas/task" +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/requests_test.go new file mode 100644 index 000000000000..bc9af17aca1e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/testing/requests_test.go @@ -0,0 +1,138 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fakeclient "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/tasks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, TasksListResult) + }) + + count := 0 + + tasks.List(fakeclient.ServiceClient(), tasks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := tasks.ExtractTasks(page) + if err != nil { + t.Errorf("Failed to extract tasks: %v", err) + return false, nil + } + + expected := []tasks.Task{ + Task1, + Task2, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/tasks/1252f636-1246-4319-bfba-c47cde0efbe0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, TasksGetResult) + }) + + s, err := tasks.Get(fakeclient.ServiceClient(), "1252f636-1246-4319-bfba-c47cde0efbe0").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, string(tasks.TaskStatusPending)) + th.AssertEquals(t, s.CreatedAt, time.Date(2018, 7, 25, 8, 59, 13, 0, time.UTC)) + th.AssertEquals(t, s.UpdatedAt, time.Date(2018, 7, 25, 8, 59, 14, 0, time.UTC)) + th.AssertEquals(t, s.Self, "/v2/tasks/1252f636-1246-4319-bfba-c47cde0efbe0") + th.AssertEquals(t, s.Owner, "424e7cf0243c468ca61732ba45973b3e") + th.AssertEquals(t, s.Message, "") + th.AssertEquals(t, s.Type, "import") + th.AssertEquals(t, s.ID, "1252f636-1246-4319-bfba-c47cde0efbe0") + th.AssertEquals(t, s.Schema, "/v2/schemas/task") + th.AssertDeepEquals(t, s.Result, map[string]interface{}(nil)) + th.AssertDeepEquals(t, s.Input, map[string]interface{}{ + "import_from": "http://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img", + "import_from_format": "raw", + "image_properties": map[string]interface{}{ + "container_format": "bare", + "disk_format": "raw", + }, + }) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/tasks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fakeclient.TokenID) + th.TestJSONRequest(t, r, TaskCreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, TaskCreateResult) + }) + + opts := tasks.CreateOpts{ + Type: "import", + Input: map[string]interface{}{ + "image_properties": map[string]interface{}{ + "container_format": "bare", + "disk_format": "raw", + }, + "import_from_format": "raw", + "import_from": "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img", + }, + } + s, err := tasks.Create(fakeclient.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, string(tasks.TaskStatusPending)) + th.AssertEquals(t, s.CreatedAt, time.Date(2018, 7, 25, 11, 7, 54, 0, time.UTC)) + th.AssertEquals(t, s.UpdatedAt, time.Date(2018, 7, 25, 11, 7, 54, 0, time.UTC)) + th.AssertEquals(t, s.Self, "/v2/tasks/d550c87d-86ed-430a-9895-c7a1f5ce87e9") + th.AssertEquals(t, s.Owner, "fb57277ef2f84a0e85b9018ec2dedbf7") + th.AssertEquals(t, s.Message, "") + th.AssertEquals(t, s.Type, "import") + th.AssertEquals(t, s.ID, "d550c87d-86ed-430a-9895-c7a1f5ce87e9") + th.AssertEquals(t, s.Schema, "/v2/schemas/task") + th.AssertDeepEquals(t, s.Result, map[string]interface{}(nil)) + th.AssertDeepEquals(t, s.Input, map[string]interface{}{ + "import_from": "https://cloud-images.ubuntu.com/bionic/current/bionic-server-cloudimg-amd64.img", + "import_from_format": "raw", + "image_properties": map[string]interface{}{ + "container_format": "bare", + "disk_format": "raw", + }, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/urls.go new file mode 100644 index 000000000000..8133f383567a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/imageservice/v2/tasks/urls.go @@ -0,0 +1,55 @@ +package tasks + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/utils" +) + +const resourcePath = "tasks" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, taskID string) string { + return c.ServiceURL(resourcePath, taskID) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, taskID string) string { + return resourceURL(c, taskID) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func nextPageURL(serviceURL, requestedNext string) (string, error) { + base, err := utils.BaseEndpoint(serviceURL) + if err != nil { + return "", err + } + + requestedNextURL, err := url.Parse(requestedNext) + if err != nil { + return "", err + } + + base = gophercloud.NormalizeURL(base) + nextPath := base + strings.TrimPrefix(requestedNextURL.Path, "/") + + nextURL, err := url.Parse(nextPath) + if err != nil { + return "", err + } + + nextURL.RawQuery = requestedNextURL.RawQuery + + return nextURL.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/doc.go new file mode 100644 index 000000000000..d61940cb080d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/doc.go @@ -0,0 +1,54 @@ +/* +Package acls manages acls in the OpenStack Key Manager Service. + +All functions have a Secret and Container equivalent. + +Example to Get a Secret's ACL + + acl, err := acls.GetSecretACL(client, secretID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", acl) + +Example to Set a Secret's ACL + + users := []string{"uuid", "uuid"} + iFalse := false + setOpts := acls.SetOpts{ + Type: "read", + users: &users, + ProjectAccess: &iFalse, + } + + aclRef, err := acls.SetSecretACL(client, secretID, setOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", aclRef) + +Example to Update a Secret's ACL + + users := []string{} + setOpts := acls.SetOpts{ + Type: "read", + users: &users, + } + + aclRef, err := acls.UpdateSecretACL(client, secretID, setOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", aclRef) + +Example to Delete a Secret's ACL + + err := acls.DeleteSecretACL(client, secretID).ExtractErr() + if err != nil { + panci(err) + } +*/ +package acls diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/requests.go new file mode 100644 index 000000000000..b83c56075d02 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/requests.go @@ -0,0 +1,112 @@ +package acls + +import ( + "github.com/gophercloud/gophercloud" +) + +// GetContainerACL retrieves the ACL of a container. +func GetContainerACL(client *gophercloud.ServiceClient, containerID string) (r ACLResult) { + _, r.Err = client.Get(containerURL(client, containerID), &r.Body, nil) + return +} + +// GetSecretACL retrieves the ACL of a secret. +func GetSecretACL(client *gophercloud.ServiceClient, secretID string) (r ACLResult) { + _, r.Err = client.Get(secretURL(client, secretID), &r.Body, nil) + return +} + +// SetOptsBuilder allows extensions to add additional parameters to the +// Set request. +type SetOptsBuilder interface { + ToACLSetMap() (map[string]interface{}, error) +} + +// SetOpts represents options to set an ACL on a resource. +type SetOpts struct { + // Type is the type of ACL to set. ie: read. + Type string `json:"-" required:"true"` + + // Users are the list of Keystone user UUIDs. + Users *[]string `json:"users,omitempty"` + + // ProjectAccess toggles if all users in a project can access the resource. + ProjectAccess *bool `json:"project-access,omitempty"` +} + +// ToACLSetMap formats a SetOpts into a set request. +func (opts SetOpts) ToACLSetMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, opts.Type) +} + +// SetContainerACL will set an ACL on a container. +func SetContainerACL(client *gophercloud.ServiceClient, containerID string, opts SetOptsBuilder) (r ACLRefResult) { + b, err := opts.ToACLSetMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Put(containerURL(client, containerID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// SetSecretACL will set an ACL on a secret. +func SetSecretACL(client *gophercloud.ServiceClient, secretID string, opts SetOptsBuilder) (r ACLRefResult) { + b, err := opts.ToACLSetMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Put(secretURL(client, secretID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateContainerACL will update an ACL on a container. +func UpdateContainerACL(client *gophercloud.ServiceClient, containerID string, opts SetOptsBuilder) (r ACLRefResult) { + b, err := opts.ToACLSetMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Patch(containerURL(client, containerID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateSecretACL will update an ACL on a secret. +func UpdateSecretACL(client *gophercloud.ServiceClient, secretID string, opts SetOptsBuilder) (r ACLRefResult) { + b, err := opts.ToACLSetMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Patch(secretURL(client, secretID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteContainerACL will delete an ACL from a conatiner. +func DeleteContainerACL(client *gophercloud.ServiceClient, containerID string) (r DeleteResult) { + _, r.Err = client.Delete(containerURL(client, containerID), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteSecretACL will delete an ACL from a secret. +func DeleteSecretACL(client *gophercloud.ServiceClient, secretID string) (r DeleteResult) { + _, r.Err = client.Delete(secretURL(client, secretID), &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/results.go new file mode 100644 index 000000000000..959742da9524 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/results.go @@ -0,0 +1,85 @@ +package acls + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// ACL represents an ACL on a resource. +type ACL map[string]ACLDetails + +// ACLDetails represents the details of an ACL. +type ACLDetails struct { + // Created is when the ACL was created. + Created time.Time `json:"-"` + + // ProjectAccess denotes project-level access of the resource. + ProjectAccess bool `json:"project-access"` + + // Updated is when the ACL was updated + Updated time.Time `json:"-"` + + // Users are the UserIDs who have access to the resource. + Users []string `json:"users"` +} + +func (r *ACLDetails) UnmarshalJSON(b []byte) error { + type tmp ACLDetails + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ACLDetails(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +// ACLRef represents an ACL reference. +type ACLRef string + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as an ACL. +func (r commonResult) Extract() (*ACL, error) { + var s *ACL + err := r.ExtractInto(&s) + return s, err +} + +// ACLResult is the response from a Get operation. Call its Extract method +// to interpret it as an ACL. +type ACLResult struct { + commonResult +} + +// ACLRefResult is the response from a Set or Update operation. Call its +// Extract method to interpret it as an ACLRef. +type ACLRefResult struct { + gophercloud.Result +} + +func (r ACLRefResult) Extract() (*ACLRef, error) { + var s struct { + ACLRef ACLRef `json:"acl_ref"` + } + err := r.ExtractInto(&s) + return &s.ACLRef, err +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/fixtures.go new file mode 100644 index 000000000000..7ae5b5971c92 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/fixtures.go @@ -0,0 +1,168 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const GetResponse = ` +{ + "read": { + "created": "2018-06-22T17:54:24", + "project-access": false, + "updated": "2018-06-22T17:54:24", + "users": [ + "GG27dVwR9gBMnsOaRoJ1DFJmZfdVjIdW" + ] + } +}` + +const SetRequest = ` +{ + "read": { + "project-access": false, + "users": [ + "GG27dVwR9gBMnsOaRoJ1DFJmZfdVjIdW" + ] + } +}` + +const SecretSetResponse = ` +{ + "acl_ref": "http://barbican:9311/v1/secrets/4befede0-fbde-4480-982c-b160c1014a47/acl" +}` + +const ContainerSetResponse = ` +{ + "acl_ref": "http://barbican:9311/v1/containers/4befede0-fbde-4480-982c-b160c1014a47/acl" +}` + +var ExpectedACL = acls.ACL{ + "read": acls.ACLDetails{ + Created: time.Date(2018, 6, 22, 17, 54, 24, 0, time.UTC), + ProjectAccess: false, + Updated: time.Date(2018, 6, 22, 17, 54, 24, 0, time.UTC), + Users: []string{ + "GG27dVwR9gBMnsOaRoJ1DFJmZfdVjIdW", + }, + }, +} + +var ExpectedSecretACLRef = acls.ACLRef("http://barbican:9311/v1/secrets/4befede0-fbde-4480-982c-b160c1014a47/acl") + +var ExpectedContainerACLRef = acls.ACLRef("http://barbican:9311/v1/containers/4befede0-fbde-4480-982c-b160c1014a47/acl") + +const UpdateRequest = ` +{ + "read": { + "users": [] + } +}` + +// HandleGetSecretACLSuccessfully creates an HTTP handler at `/secrets/uuid/acl` +// on the test handler mux that responds with an acl. +func HandleGetSecretACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleGetContainerACLSuccessfully creates an HTTP handler at `/secrets/uuid/acl` +// on the test handler mux that responds with an acl. +func HandleGetContainerACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleSetSecretACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret creation. +func HandleSetSecretACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, SetRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, SecretSetResponse) + }) +} + +// HandleSetContainerACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret creation. +func HandleSetContainerACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, SetRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ContainerSetResponse) + }) +} + +// HandleUpdateSecretACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret creation. +func HandleUpdateSecretACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, SecretSetResponse) + }) +} + +// HandleUpdateContainerACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret creation. +func HandleUpdateContainerACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, UpdateRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ContainerSetResponse) + }) +} + +// HandleDeleteSecretACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret deletion. +func HandleDeleteSecretACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusOK) + }) +} + +// HandleDeleteContainerACLSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret deletion. +func HandleDeleteContainerACLSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/acl", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusOK) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/requests_test.go new file mode 100644 index 000000000000..b978f405f100 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/testing/requests_test.go @@ -0,0 +1,115 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetSecretACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSecretACLSuccessfully(t) + + actual, err := acls.GetSecretACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedACL, *actual) +} + +func TestGetContainerACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetContainerACLSuccessfully(t) + + actual, err := acls.GetContainerACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedACL, *actual) +} + +func TestSetSecretACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleSetSecretACLSuccessfully(t) + + users := []string{"GG27dVwR9gBMnsOaRoJ1DFJmZfdVjIdW"} + iFalse := false + setOpts := acls.SetOpts{ + Type: "read", + Users: &users, + ProjectAccess: &iFalse, + } + + actual, err := acls.SetSecretACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", setOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedSecretACLRef, *actual) +} + +func TestSetContainerACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleSetContainerACLSuccessfully(t) + + users := []string{"GG27dVwR9gBMnsOaRoJ1DFJmZfdVjIdW"} + iFalse := false + setOpts := acls.SetOpts{ + Type: "read", + Users: &users, + ProjectAccess: &iFalse, + } + + actual, err := acls.SetContainerACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", setOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedContainerACLRef, *actual) +} + +func TestDeleteSecretACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSecretACLSuccessfully(t) + + res := acls.DeleteSecretACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c") + th.AssertNoErr(t, res.Err) +} + +func TestDeleteContainerACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteContainerACLSuccessfully(t) + + res := acls.DeleteContainerACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateSecretACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSecretACLSuccessfully(t) + + newUsers := []string{} + updateOpts := acls.SetOpts{ + Type: "read", + Users: &newUsers, + } + + actual, err := acls.UpdateSecretACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedSecretACLRef, *actual) +} + +func TestUpdateContainerACL(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateContainerACLSuccessfully(t) + + newUsers := []string{} + updateOpts := acls.SetOpts{ + Type: "read", + Users: &newUsers, + } + + actual, err := acls.UpdateContainerACL(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedContainerACLRef, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/urls.go new file mode 100644 index 000000000000..5cb4439a8c9a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/acls/urls.go @@ -0,0 +1,11 @@ +package acls + +import "github.com/gophercloud/gophercloud" + +func containerURL(client *gophercloud.ServiceClient, containerID string) string { + return client.ServiceURL("containers", containerID, "acl") +} + +func secretURL(client *gophercloud.ServiceClient, secretID string) string { + return client.ServiceURL("secrets", secretID, "acl") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/doc.go new file mode 100644 index 000000000000..cb11920f29ef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/doc.go @@ -0,0 +1,86 @@ +/* +Package containers manages and retrieves containers in the OpenStack Key Manager +Service. + +Example to List Containers + + allPages, err := containers.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractContainers(allPages) + if err != nil { + panic(err) + } + + for _, v := range allContainers { + fmt.Printf("%v\n", v) + } + +Example to Create a Container + + createOpts := containers.CreateOpts{ + Type: containers.GenericContainer, + Name: "mycontainer", + SecretRefs: []containers.SecretRef{ + { + Name: secret.Name, + SecretRef: secret.SecretRef, + }, + }, + } + + container, err := containers.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", container) + +Example to Delete a Container + + err := containers.Delete(client, containerID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Consumers of a Container + + allPages, err := containers.ListConsumers(client, containerID, nil).AllPages() + if err != nil { + panic(err) + } + + allConsumers, err := containers.ExtractConsumers(allPages) + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", allConsumers) + +Example to Create a Consumer of a Container + + createOpts := containers.CreateConsumerOpts{ + Name: "jdoe", + URL: "http://example.com", + } + + container, err := containers.CreateConsumer(client, containerID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Consumer of a Container + + deleteOpts := containers.DeleteConsumerOpts{ + Name: "jdoe", + URL: "http://example.com", + } + + container, err := containers.DeleteConsumer(client, containerID, deleteOpts).Extract() + if err != nil { + panic(err) + } +*/ +package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/requests.go new file mode 100644 index 000000000000..190438ed280a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/requests.go @@ -0,0 +1,212 @@ +package containers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ContainerType represents the valid types of containers. +type ContainerType string + +const ( + GenericContainer ContainerType = "generic" + RSAContainer ContainerType = "rsa" + CertificateContainer ContainerType = "certificate" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToContainerListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Limit is the amount of containers to retrieve. + Limit int `q:"limit"` + + // Name is the name of the container + Name string `q:"name"` + + // Offset is the index within the list to retrieve. + Offset int `q:"offset"` +} + +// ToContainerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToContainerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List retrieves a list of containers. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToContainerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ContainerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details of a container. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a container. +type CreateOpts struct { + // Type represents the type of container. + Type ContainerType `json:"type" required:"true"` + + // Name is the name of the container. + Name string `json:"name"` + + // SecretRefs is a list of secret refs for the container. + SecretRefs []SecretRef `json:"secret_refs"` +} + +// ToContainerCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToContainerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create creates a new container. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToContainerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete deletes a container. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListConsumersOptsBuilder allows extensions to add additional parameters to +// the ListConsumers request +type ListConsumersOptsBuilder interface { + ToContainerListConsumersQuery() (string, error) +} + +// ListConsumersOpts provides options to filter the List results. +type ListConsumersOpts struct { + // Limit is the amount of consumers to retrieve. + Limit int `q:"limit"` + + // Offset is the index within the list to retrieve. + Offset int `q:"offset"` +} + +// ToContainerListConsumersQuery formats a ListConsumersOpts into a query +// string. +func (opts ListOpts) ToContainerListConsumersQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListConsumers retrieves a list of consumers from a container. +func ListConsumers(client *gophercloud.ServiceClient, containerID string, opts ListConsumersOptsBuilder) pagination.Pager { + url := listConsumersURL(client, containerID) + if opts != nil { + query, err := opts.ToContainerListConsumersQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ConsumerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateConsumerOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateConsumerOptsBuilder interface { + ToContainerConsumerCreateMap() (map[string]interface{}, error) +} + +// CreateConsumerOpts provides options used to create a container. +type CreateConsumerOpts struct { + // Name is the name of the consumer. + Name string `json:"name"` + + // URL is the URL to the consumer resource. + URL string `json:"URL"` +} + +// ToContainerConsumerCreateMap formats a CreateConsumerOpts into a create +// request. +func (opts CreateConsumerOpts) ToContainerConsumerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// CreateConsumer creates a new consumer. +func CreateConsumer(client *gophercloud.ServiceClient, containerID string, opts CreateConsumerOptsBuilder) (r CreateConsumerResult) { + b, err := opts.ToContainerConsumerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createConsumerURL(client, containerID), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteConsumerOptsBuilder allows extensions to add additional parameters to +// the Delete request. +type DeleteConsumerOptsBuilder interface { + ToContainerConsumerDeleteMap() (map[string]interface{}, error) +} + +// DeleteConsumerOpts represents options used for deleting a consumer. +type DeleteConsumerOpts struct { + // Name is the name of the consumer. + Name string `json:"name"` + + // URL is the URL to the consumer resource. + URL string `json:"URL"` +} + +// ToContainerConsumerDeleteMap formats a DeleteConsumerOpts into a create +// request. +func (opts DeleteConsumerOpts) ToContainerConsumerDeleteMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// DeleteConsumer deletes a consumer. +func DeleteConsumer(client *gophercloud.ServiceClient, containerID string, opts DeleteConsumerOptsBuilder) (r DeleteConsumerResult) { + url := deleteConsumerURL(client, containerID) + + b, err := opts.ToContainerConsumerDeleteMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Request("DELETE", url, &gophercloud.RequestOpts{ + JSONBody: b, + JSONResponse: &r.Body, + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/results.go new file mode 100644 index 000000000000..c94c9ac67108 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/results.go @@ -0,0 +1,232 @@ +package containers + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Container represents a container in the key manager service. +type Container struct { + // Consumers are the consumers of the container. + Consumers []ConsumerRef `json:"consumers"` + + // ContainerRef is the URL to the container + ContainerRef string `json:"container_ref"` + + // Created is the date the container was created. + Created time.Time `json:"-"` + + // CreatorID is the creator of the container. + CreatorID string `json:"creator_id"` + + // Name is the name of the container. + Name string `json:"name"` + + // SecretRefs are the secret references of the container. + SecretRefs []SecretRef `json:"secret_refs"` + + // Status is the status of the container. + Status string `json:"status"` + + // Type is the type of container. + Type string `json:"type"` + + // Updated is the date the container was updated. + Updated time.Time `json:"-"` +} + +func (r *Container) UnmarshalJSON(b []byte) error { + type tmp Container + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Container(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +// ConsumerRef represents a consumer reference in a container. +type ConsumerRef struct { + // Name is the name of the consumer. + Name string `json:"name"` + + // URL is the URL to the consumer resource. + URL string `json:"url"` +} + +// SecretRef is a reference to a secret. +type SecretRef struct { + SecretRef string `json:"secret_ref"` + Name string `json:"name"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a Container. +func (r commonResult) Extract() (*Container, error) { + var s *Container + err := r.ExtractInto(&s) + return s, err +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a container. +type GetResult struct { + commonResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a container. +type CreateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ContainerPage is a single page of container results. +type ContainerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of Container contains any results. +func (r ContainerPage) IsEmpty() (bool, error) { + containers, err := ExtractContainers(r) + return len(containers) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ContainerPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + Previous string `json:"previous"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, err +} + +// ExtractContainers returns a slice of Containers contained in a single page of +// results. +func ExtractContainers(r pagination.Page) ([]Container, error) { + var s struct { + Containers []Container `json:"containers"` + } + err := (r.(ContainerPage)).ExtractInto(&s) + return s.Containers, err +} + +// Consumer represents a consumer in a container. +type Consumer struct { + // Created is the date the container was created. + Created time.Time `json:"-"` + + // Name is the name of the container. + Name string `json:"name"` + + // Status is the status of the container. + Status string `json:"status"` + + // Updated is the date the container was updated. + Updated time.Time `json:"-"` + + // URL is the url to the consumer. + URL string `json:"url"` +} + +func (r *Consumer) UnmarshalJSON(b []byte) error { + type tmp Consumer + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Consumer(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +type consumerResult struct { + gophercloud.Result +} + +// Extract interprets any consumerResult as a Consumer. +func (r consumerResult) Extract() (*Consumer, error) { + var s *Consumer + err := r.ExtractInto(&s) + return s, err +} + +// CreateConsumerResult is the response from a CreateConsumer operation. +// Call its Extract method to interpret it as a container. +type CreateConsumerResult struct { + // This is not a typo. + commonResult +} + +// DeleteConsumerResult is the response from a DeleteConsumer operation. +// Call its Extract to interpret it as a container. +type DeleteConsumerResult struct { + // This is not a typo. + commonResult +} + +// ConsumerPage is a single page of consumer results. +type ConsumerPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of consumers contains any results. +func (r ConsumerPage) IsEmpty() (bool, error) { + consumers, err := ExtractConsumers(r) + return len(consumers) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r ConsumerPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + Previous string `json:"previous"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, err +} + +// ExtractConsumers returns a slice of Consumers contained in a single page of +// results. +func ExtractConsumers(r pagination.Page) ([]Consumer, error) { + var s struct { + Consumers []Consumer `json:"consumers"` + } + err := (r.(ConsumerPage)).ExtractInto(&s) + return s.Consumers, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/fixtures.go new file mode 100644 index 000000000000..3c73e38b1ce7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/fixtures.go @@ -0,0 +1,321 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListResponse provides a single page of container results. +const ListResponse = ` +{ + "containers": [ + { + "consumers": [], + "container_ref": "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + "created": "2018-06-21T21:28:37", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "name": "mycontainer", + "secret_refs": [ + { + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" + } + ], + "status": "ACTIVE", + "type": "generic", + "updated": "2018-06-21T21:28:37" + }, + { + "consumers": [], + "container_ref": "http://barbican:9311/v1/containers/47b20e73-335b-4867-82dc-3796524d5e20", + "created": "2018-06-21T21:30:09", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "name": "anothercontainer", + "secret_refs": [ + { + "name": "another", + "secret_ref": "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac" + } + ], + "status": "ACTIVE", + "type": "generic", + "updated": "2018-06-21T21:30:09" + } + ], + "total": 2 +}` + +// GetResponse provides a Get result. +const GetResponse = ` +{ + "consumers": [], + "container_ref": "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + "created": "2018-06-21T21:28:37", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "name": "mycontainer", + "secret_refs": [ + { + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" + } + ], + "status": "ACTIVE", + "type": "generic", + "updated": "2018-06-21T21:28:37" +}` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "name": "mycontainer", + "secret_refs": [ + { + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" + } + ], + "type": "generic" +}` + +// CreateResponse is the response of a Create request. +const CreateResponse = ` +{ + "consumers": [], + "container_ref": "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + "created": "2018-06-21T21:28:37", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "name": "mycontainer", + "secret_refs": [ + { + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" + } + ], + "status": "ACTIVE", + "type": "generic", + "updated": "2018-06-21T21:28:37" +}` + +// FirstContainer is the first resource in the List request. +var FirstContainer = containers.Container{ + Consumers: []containers.ConsumerRef{}, + ContainerRef: "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + Created: time.Date(2018, 6, 21, 21, 28, 37, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Name: "mycontainer", + SecretRefs: []containers.SecretRef{ + { + Name: "mysecret", + SecretRef: "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + }, + }, + Status: "ACTIVE", + Type: "generic", + Updated: time.Date(2018, 6, 21, 21, 28, 37, 0, time.UTC), +} + +// SecondContainer is the second resource in the List request. +var SecondContainer = containers.Container{ + Consumers: []containers.ConsumerRef{}, + ContainerRef: "http://barbican:9311/v1/containers/47b20e73-335b-4867-82dc-3796524d5e20", + Created: time.Date(2018, 6, 21, 21, 30, 9, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Name: "anothercontainer", + SecretRefs: []containers.SecretRef{ + { + Name: "another", + SecretRef: "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac", + }, + }, + Status: "ACTIVE", + Type: "generic", + Updated: time.Date(2018, 6, 21, 21, 30, 9, 0, time.UTC), +} + +// ExpectedContainersSlice is the slice of containers expected to be returned from ListResponse. +var ExpectedContainersSlice = []containers.Container{FirstContainer, SecondContainer} + +const ListConsumersResponse = ` +{ + "consumers": [ + { + "URL": "http://example.com", + "created": "2018-06-22T16:26:25", + "name": "CONSUMER-LZILN1zq", + "status": "ACTIVE", + "updated": "2018-06-22T16:26:25" + } + ], + "total": 1 +}` + +// ExpectedConsumer is the expected result of a consumer retrieval. +var ExpectedConsumer = containers.Consumer{ + URL: "http://example.com", + Created: time.Date(2018, 6, 22, 16, 26, 25, 0, time.UTC), + Name: "CONSUMER-LZILN1zq", + Status: "ACTIVE", + Updated: time.Date(2018, 6, 22, 16, 26, 25, 0, time.UTC), +} + +// ExpectedConsumersSlice is an expected slice of consumers. +var ExpectedConsumersSlice = []containers.Consumer{ExpectedConsumer} + +const CreateConsumerRequest = ` +{ + "URL": "http://example.com", + "name": "CONSUMER-LZILN1zq" +}` + +const CreateConsumerResponse = ` +{ + "consumers": [ + { + "URL": "http://example.com", + "name": "CONSUMER-LZILN1zq" + } + ], + "container_ref": "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + "created": "2018-06-21T21:28:37", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "name": "mycontainer", + "secret_refs": [ + { + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" + } + ], + "status": "ACTIVE", + "type": "generic", + "updated": "2018-06-21T21:28:37" +}` + +// ExpectedCreatedConsumer is the expected result of adding a consumer. +var ExpectedCreatedConsumer = containers.Container{ + Consumers: []containers.ConsumerRef{ + { + Name: "CONSUMER-LZILN1zq", + URL: "http://example.com", + }, + }, + ContainerRef: "http://barbican:9311/v1/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", + Created: time.Date(2018, 6, 21, 21, 28, 37, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Name: "mycontainer", + SecretRefs: []containers.SecretRef{ + { + Name: "mysecret", + SecretRef: "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + }, + }, + Status: "ACTIVE", + Type: "generic", + Updated: time.Date(2018, 6, 21, 21, 28, 37, 0, time.UTC), +} + +const DeleteConsumerRequest = ` +{ + "URL": "http://example.com", + "name": "CONSUMER-LZILN1zq" +}` + +// HandleListContainersSuccessfully creates an HTTP handler at `/containers` on the +// test handler mux that responds with a list of two containers. +func HandleListContainersSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResponse) + }) +} + +// HandleGetContainerSuccessfully creates an HTTP handler at `/containers` on the +// test handler mux that responds with a single resource. +func HandleGetContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleCreateContainerSuccessfully creates an HTTP handler at `/containers` on the +// test handler mux that tests resource creation. +func HandleCreateContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleDeleteContainerSuccessfully creates an HTTP handler at `/containers` on the +// test handler mux that tests resource deletion. +func HandleDeleteContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleListConsumersSuccessfully creates an HTTP handler at +// `/containers/uuid/consumers` on the test handler mux that responds with +// a list of consumers. +func HandleListConsumersSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0/consumers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListConsumersResponse) + }) +} + +// HandleCreateConsumerSuccessfully creates an HTTP handler at +// `/containers/uuid/consumers` on the test handler mux that tests resource +// creation. +func HandleCreateConsumerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0/consumers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateConsumerRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, CreateConsumerResponse) + }) +} + +// HandleDeleteConsumerSuccessfully creates an HTTP handler at +// `/containers/uuid/consumers` on the test handler mux that tests resource +// deletion. +func HandleDeleteConsumerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/containers/dfdb88f3-4ddb-4525-9da6-066453caa9b0/consumers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateConsumerRequest) + + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/requests_test.go new file mode 100644 index 000000000000..2f405e3c55d5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/testing/requests_test.go @@ -0,0 +1,144 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListContainers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainersSuccessfully(t) + + count := 0 + err := containers.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := containers.ExtractContainers(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedContainersSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestListContainersAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainersSuccessfully(t) + + allPages, err := containers.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := containers.ExtractContainers(allPages) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedContainersSlice, actual) +} + +func TestGetContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetContainerSuccessfully(t) + + actual, err := containers.Get(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, FirstContainer, *actual) +} + +func TestCreateContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateContainerSuccessfully(t) + + createOpts := containers.CreateOpts{ + Type: containers.GenericContainer, + Name: "mycontainer", + SecretRefs: []containers.SecretRef{ + { + Name: "mysecret", + SecretRef: "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + }, + }, + } + + actual, err := containers.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, FirstContainer, *actual) +} + +func TestDeleteContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteContainerSuccessfully(t) + + res := containers.Delete(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0") + th.AssertNoErr(t, res.Err) +} + +func TestListConsumers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListConsumersSuccessfully(t) + + count := 0 + err := containers.ListConsumers(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := containers.ExtractConsumers(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedConsumersSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestListConsumersAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListConsumersSuccessfully(t) + + allPages, err := containers.ListConsumers(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0", nil).AllPages() + th.AssertNoErr(t, err) + actual, err := containers.ExtractConsumers(allPages) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedConsumersSlice, actual) +} + +func TestCreateConsumer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateConsumerSuccessfully(t) + + createOpts := containers.CreateConsumerOpts{ + Name: "CONSUMER-LZILN1zq", + URL: "http://example.com", + } + + actual, err := containers.CreateConsumer(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0", createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCreatedConsumer, *actual) +} + +func TestDeleteConsumer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteConsumerSuccessfully(t) + + deleteOpts := containers.DeleteConsumerOpts{ + Name: "CONSUMER-LZILN1zq", + URL: "http://example.com", + } + + actual, err := containers.DeleteConsumer(client.ServiceClient(), "dfdb88f3-4ddb-4525-9da6-066453caa9b0", deleteOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, FirstContainer, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/urls.go new file mode 100644 index 000000000000..236ebc8133d2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/containers/urls.go @@ -0,0 +1,31 @@ +package containers + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("containers") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("containers", id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("containers") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("containers", id) +} + +func listConsumersURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("containers", id, "consumers") +} + +func createConsumerURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("containers", id, "consumers") +} + +func deleteConsumerURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("containers", id, "consumers") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/doc.go new file mode 100644 index 000000000000..fcbeccf4e50b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/doc.go @@ -0,0 +1,45 @@ +/* +Package orders manages and retrieves orders in the OpenStack Key Manager +Service. + +Example to List Orders + + allPages, err := orders.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allOrders, err := orders.ExtractOrders(allPages) + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", allOrders) + +Example to Create a Order + + createOpts := orders.CreateOpts{ + Type: orders.KeyOrder, + Meta: orders.MetaOpts{ + Name: "order-name", + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + }, + } + + order, err := orders.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", order) + +Example to Delete a Order + + err := orders.Delete(client, orderID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package orders diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/requests.go new file mode 100644 index 000000000000..7a55f9ddb813 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/requests.go @@ -0,0 +1,129 @@ +package orders + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// OrderType represents the valid types of orders. +type OrderType string + +const ( + KeyOrder OrderType = "key" + AsymmetricOrder OrderType = "asymmetric" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToOrderListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Limit is the amount of containers to retrieve. + Limit int `q:"limit"` + + // Offset is the index within the list to retrieve. + Offset int `q:"offset"` +} + +// ToOrderListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToOrderListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List retrieves a list of orders. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToOrderListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return OrderPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details of a orders. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToOrderCreateMap() (map[string]interface{}, error) +} + +// MetaOpts represents options used for creating an order. +type MetaOpts struct { + // Algorithm is the algorithm of the secret. + Algorithm string `json:"algorithm"` + + // BitLength is the bit length of the secret. + BitLength int `json:"bit_length"` + + // Expiration is the expiration date of the order. + Expiration *time.Time `json:"-"` + + // Mode is the mode of the secret. + Mode string `json:"mode"` + + // Name is the name of the secret. + Name string `json:"name,omitempty"` + + // PayloadContentType is the content type of the secret payload. + PayloadContentType string `json:"payload_content_type,omitempty"` +} + +// CreateOpts provides options used to create a orders. +type CreateOpts struct { + // Type is the type of order to create. + Type OrderType `json:"type"` + + // Meta contains secrets data to create a secret. + Meta MetaOpts `json:"meta"` +} + +// ToOrderCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToOrderCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.Meta.Expiration != nil { + meta := b["meta"].(map[string]interface{}) + meta["expiration"] = opts.Meta.Expiration.Format(gophercloud.RFC3339NoZ) + b["meta"] = meta + } + + return b, nil +} + +// Create creates a new orders. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToOrderCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} + +// Delete deletes a orders. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/results.go new file mode 100644 index 000000000000..35e89b50ba56 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/results.go @@ -0,0 +1,170 @@ +package orders + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Order represents an order in the key manager service. +type Order struct { + // ContainerRef is the container URL. + ContainerRef string `json:"container_ref"` + + // Created is when the order was created. + Created time.Time `json:"-"` + + // CreatorID is the creator of the order. + CreatorID string `json:"creator_id"` + + // ErrorReason is the reason of the error. + ErrorReason string `json:"error_reason"` + + // ErrorStatusCode is the error status code. + ErrorStatusCode string `json:"error_status_code"` + + // OrderRef is the order URL. + OrderRef string `json:"order_ref"` + + // Meta is secret data about the order. + Meta Meta `json:"meta"` + + // SecretRef is the secret URL. + SecretRef string `json:"secret_ref"` + + // Status is the status of the order. + Status string `json:"status"` + + // SubStatus is the status of the order. + SubStatus string `json:"sub_status"` + + // SubStatusMessage is the message of the sub status. + SubStatusMessage string `json:"sub_status_message"` + + // Type is the order type. + Type string `json:"type"` + + // Updated is when the order was updated. + Updated time.Time `json:"-"` +} + +func (r *Order) UnmarshalJSON(b []byte) error { + type tmp Order + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Order(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + + return nil +} + +type Meta struct { + // Algorithm is the algorithm of the secret. + Algorithm string `json:"algorithm"` + + // BitLength is the bit length of the secret. + BitLength int `json:"bit_length"` + + // Expiration is the expiration date of the order. + Expiration time.Time `json:"-"` + + // Mode is the mode of the secret. + Mode string `json:"mode"` + + // Name is the name of the secret. + Name string `json:"name"` + + // PayloadContentType is the content type of the secret payload. + PayloadContentType string `json:"payload_content_type"` +} + +func (r *Meta) UnmarshalJSON(b []byte) error { + type tmp Meta + var s struct { + tmp + Expiration gophercloud.JSONRFC3339NoZ `json:"expiration"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Meta(s.tmp) + + r.Expiration = time.Time(s.Expiration) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a orders. +type GetResult struct { + commonResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a orders. +type CreateResult struct { + commonResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// OrderPage is a single page of orders results. +type OrderPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of ordersS contains any results. +func (r OrderPage) IsEmpty() (bool, error) { + orders, err := ExtractOrders(r) + return len(orders) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r OrderPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + Previous string `json:"previous"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, err +} + +// ExtractOrders returns a slice of Orders contained in a single page of +// results. +func ExtractOrders(r pagination.Page) ([]Order, error) { + var s struct { + Orders []Order `json:"orders"` + } + err := (r.(OrderPage)).ExtractInto(&s) + return s.Orders, err +} + +// Extract interprets any commonResult as a Order. +func (r commonResult) Extract() (*Order, error) { + var s *Order + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/fixtures.go new file mode 100644 index 000000000000..bdab5427b0e2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/fixtures.go @@ -0,0 +1,186 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListResponse provides a single page of RESOURCE results. +const ListResponse = ` +{ + "orders": [ + { + "created": "2018-06-22T05:05:43", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "meta": { + "algorithm": "aes", + "bit_length": 256, + "expiration": null, + "mode": "cbc", + "name": null, + "payload_content_type": "application/octet-stream" + }, + "order_ref": "http://barbican:9311/v1/orders/46f73695-82bb-447a-bf96-6635f0fb0ce7", + "secret_ref": "http://barbican:9311/v1/secrets/22dfef44-1046-4549-a86d-95af462e8fa0", + "status": "ACTIVE", + "sub_status": "Unknown", + "sub_status_message": "Unknown", + "type": "key", + "updated": "2018-06-22T05:05:43" + }, + { + "created": "2018-06-22T05:08:15", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "meta": { + "algorithm": "aes", + "bit_length": 256, + "expiration": null, + "mode": "cbc", + "name": null, + "payload_content_type": "application/octet-stream" + }, + "order_ref": "http://barbican:9311/v1/orders/07fba88b-3dcf-44e3-a4a3-0bad7f56f01c", + "secret_ref": "http://barbican:9311/v1/secrets/a31ad551-1aa5-4ba0-810e-0865163e0fa9", + "status": "ACTIVE", + "sub_status": "Unknown", + "sub_status_message": "Unknown", + "type": "key", + "updated": "2018-06-22T05:08:15" + } + ], + "total": 2 +}` + +// GetResponse provides a Get result. +const GetResponse = ` +{ + "created": "2018-06-22T05:08:15", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "meta": { + "algorithm": "aes", + "bit_length": 256, + "expiration": null, + "mode": "cbc", + "name": null, + "payload_content_type": "application/octet-stream" + }, + "order_ref": "http://barbican:9311/v1/orders/07fba88b-3dcf-44e3-a4a3-0bad7f56f01c", + "secret_ref": "http://barbican:9311/v1/secrets/a31ad551-1aa5-4ba0-810e-0865163e0fa9", + "status": "ACTIVE", + "sub_status": "Unknown", + "sub_status_message": "Unknown", + "type": "key", + "updated": "2018-06-22T05:08:15" +} +` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "meta": { + "algorithm": "aes", + "bit_length": 256, + "mode": "cbc", + "payload_content_type": "application/octet-stream" + }, + "type": "key" +}` + +// FirstOrder is the first resource in the List request. +var FirstOrder = orders.Order{ + Created: time.Date(2018, 6, 22, 5, 5, 43, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Meta: orders.Meta{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + PayloadContentType: "application/octet-stream", + }, + OrderRef: "http://barbican:9311/v1/orders/46f73695-82bb-447a-bf96-6635f0fb0ce7", + SecretRef: "http://barbican:9311/v1/secrets/22dfef44-1046-4549-a86d-95af462e8fa0", + Status: "ACTIVE", + SubStatus: "Unknown", + SubStatusMessage: "Unknown", + Type: "key", + Updated: time.Date(2018, 6, 22, 5, 5, 43, 0, time.UTC), +} + +// SecondOrder is the second resource in the List request. +var SecondOrder = orders.Order{ + Created: time.Date(2018, 6, 22, 5, 8, 15, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Meta: orders.Meta{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + PayloadContentType: "application/octet-stream", + }, + OrderRef: "http://barbican:9311/v1/orders/07fba88b-3dcf-44e3-a4a3-0bad7f56f01c", + SecretRef: "http://barbican:9311/v1/secrets/a31ad551-1aa5-4ba0-810e-0865163e0fa9", + Status: "ACTIVE", + SubStatus: "Unknown", + SubStatusMessage: "Unknown", + Type: "key", + Updated: time.Date(2018, 6, 22, 5, 8, 15, 0, time.UTC), +} + +// ExpectedOrdersSlice is the slice of orders expected to be returned from ListResponse. +var ExpectedOrdersSlice = []orders.Order{FirstOrder, SecondOrder} + +// HandleListOrdersSuccessfully creates an HTTP handler at `/orders` on the +// test handler mux that responds with a list of two orders. +func HandleListOrdersSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/orders", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResponse) + }) +} + +// HandleGetOrderSuccessfully creates an HTTP handler at `/orders` on the +// test handler mux that responds with a single resource. +func HandleGetOrderSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/orders/46f73695-82bb-447a-bf96-6635f0fb0ce7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleCreateOrderSuccessfully creates an HTTP handler at `/orders` on the +// test handler mux that tests resource creation. +func HandleCreateOrderSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/orders", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusAccepted) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleDeleteOrderSuccessfully creates an HTTP handler at `/orders` on the +// test handler mux that tests resource deletion. +func HandleDeleteOrderSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/orders/46f73695-82bb-447a-bf96-6635f0fb0ce7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/requests_test.go new file mode 100644 index 000000000000..5c9fcb313354 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/testing/requests_test.go @@ -0,0 +1,81 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListOrders(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListOrdersSuccessfully(t) + + count := 0 + err := orders.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := orders.ExtractOrders(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedOrdersSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestListOrdersAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListOrdersSuccessfully(t) + + allPages, err := orders.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := orders.ExtractOrders(allPages) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedOrdersSlice, actual) +} + +func TestGetOrder(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetOrderSuccessfully(t) + + actual, err := orders.Get(client.ServiceClient(), "46f73695-82bb-447a-bf96-6635f0fb0ce7").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, SecondOrder, *actual) +} + +func TestCreateOrder(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateOrderSuccessfully(t) + + createOpts := orders.CreateOpts{ + Type: orders.KeyOrder, + Meta: orders.MetaOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + PayloadContentType: "application/octet-stream", + }, + } + + actual, err := orders.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, SecondOrder, *actual) +} + +func TestDeleteOrder(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteOrderSuccessfully(t) + + res := orders.Delete(client.ServiceClient(), "46f73695-82bb-447a-bf96-6635f0fb0ce7") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/urls.go new file mode 100644 index 000000000000..36890e533ac2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/orders/urls.go @@ -0,0 +1,19 @@ +package orders + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("orders") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("orders", id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("orders") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("orders", id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/doc.go new file mode 100644 index 000000000000..4167e425d21c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/doc.go @@ -0,0 +1,142 @@ +/* +Package secrets manages and retrieves secrets in the OpenStack Key Manager +Service. + +Example to List Secrets + + createdQuery := &secrets.DateQuery{ + Date: time.Date(2049, 6, 7, 1, 2, 3, 0, time.UTC), + Filter: secrets.DateFilterLT, + } + + listOpts := secrets.ListOpts{ + CreatedQuery: createdQuery, + } + + allPages, err := secrets.List(client, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSecrets, err := secrets.ExtractSecrets(allPages) + if err != nil { + panic(err) + } + + for _, v := range allSecrets { + fmt.Printf("%v\n", v) + } + +Example to Get a Secret + + secret, err := secrets.Get(client, secretID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", secret) + +Example to Get a Payload + + payload, err := secrets.GetPayload(client, secretID).Extract() + if err != nil { + panic(err) + } + + fmt.Println(string(payload)) + +Example to Create a Secrets + + createOpts := secrets.CreateOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Name: "mysecret", + Payload: "super-secret", + PayloadContentType: "text/plain", + SecretType: secrets.OpaqueSecret, + } + + secret, err := secrets.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Println(secret.SecretRef) + +Example to Add a Payload + + updateOpts := secrets.UpdateOpts{ + ContentType: "text/plain", + Payload: "super-secret", + } + + err := secrets.Update(client, secretID, updateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Delete a Secrets + + err := secrets.Delete(client, secretID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Create Metadata for a Secret + + createOpts := secrets.MetadataOpts{ + "foo": "bar", + "something": "something else", + } + + ref, err := secrets.CreateMetadata(client, secretID, createOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", ref) + +Example to Get Metadata for a Secret + + metadata, err := secrets.GetMetadata(client, secretID).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", metadata) + +Example to Add Metadata to a Secret + + metadatumOpts := secrets.MetadatumOpts{ + Key: "foo", + Value: "bar", + } + + err := secrets.CreateMetadatum(client, secretID, metadatumOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update Metadata of a Secret + + metadatumOpts := secrets.MetadatumOpts{ + Key: "foo", + Value: "bar", + } + + metadatum, err := secrets.UpdateMetadatum(client, secretID, metadatumOpts).Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%v\n", metadatum) + +Example to Delete Metadata of a Secret + + err := secrets.DeleteMetadatum(client, secretID, "foo").ExtractErr() + if err != nil { + panic(err) + } +*/ +package secrets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/requests.go new file mode 100644 index 000000000000..900cbcaaa694 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/requests.go @@ -0,0 +1,412 @@ +package secrets + +import ( + "fmt" + "net/url" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// DateFilter represents a valid filter to use for filtering +// secrets by their date during a list. +type DateFilter string + +const ( + DateFilterGT DateFilter = "gt" + DateFilterGTE DateFilter = "gte" + DateFilterLT DateFilter = "lt" + DateFilterLTE DateFilter = "lte" +) + +// DateQuery represents a date field to be used for listing secrets. +// If no filter is specified, the query will act as if "equal" is used. +type DateQuery struct { + Date time.Time + Filter DateFilter +} + +// SecretType represents a valid secret type. +type SecretType string + +const ( + SymmetricSecret SecretType = "symmetric" + PublicSecret SecretType = "public" + PrivateSecret SecretType = "private" + PassphraseSecret SecretType = "passphrase" + CertificateSecret SecretType = "certificate" + OpaqueSecret SecretType = "opaque" +) + +// ListOptsBuilder allows extensions to add additional parameters to +// the List request +type ListOptsBuilder interface { + ToSecretListQuery() (string, error) +} + +// ListOpts provides options to filter the List results. +type ListOpts struct { + // Offset is the starting index within the total list of the secrets that + // you would like to retrieve. + Offset int `q:"offset"` + + // Limit is the maximum number of records to return. + Limit int `q:"limit"` + + // Name will select all secrets with a matching name. + Name string `q:"name"` + + // Alg will select all secrets with a matching algorithm. + Alg string `q:"alg"` + + // Mode will select all secrets with a matching mode. + Mode string `q:"mode"` + + // Bits will select all secrets with a matching bit length. + Bits int `q:"bits"` + + // SecretType will select all secrets with a matching secret type. + SecretType SecretType `q:"secret_type"` + + // ACLOnly will select all secrets with an ACL that contains the user. + ACLOnly *bool `q:"acl_only"` + + // CreatedQuery will select all secrets with a created date matching + // the query. + CreatedQuery *DateQuery + + // UpdatedQuery will select all secrets with an updated date matching + // the query. + UpdatedQuery *DateQuery + + // ExpirationQuery will select all secrets with an expiration date + // matching the query. + ExpirationQuery *DateQuery + + // Sort will sort the results in the requested order. + Sort string `q:"sort"` +} + +// ToSecretListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSecretListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + params := q.Query() + + if opts.CreatedQuery != nil { + created := opts.CreatedQuery.Date.Format(time.RFC3339) + if v := opts.CreatedQuery.Filter; v != "" { + created = fmt.Sprintf("%s:%s", v, created) + } + + params.Add("created", created) + } + + if opts.UpdatedQuery != nil { + updated := opts.UpdatedQuery.Date.Format(time.RFC3339) + if v := opts.UpdatedQuery.Filter; v != "" { + updated = fmt.Sprintf("%s:%s", v, updated) + } + + params.Add("updated", updated) + } + + if opts.ExpirationQuery != nil { + expiration := opts.ExpirationQuery.Date.Format(time.RFC3339) + if v := opts.ExpirationQuery.Filter; v != "" { + expiration = fmt.Sprintf("%s:%s", v, expiration) + } + + params.Add("expiration", expiration) + } + + q = &url.URL{RawQuery: params.Encode()} + + return q.String(), err +} + +// List retrieves a list of Secrets. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSecretListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecretPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves details of a secrets. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// GetPayloadOpts represents options used for obtaining a payload. +type GetPayloadOpts struct { + PayloadContentType string `h:"Accept"` +} + +// GetPayloadOptsBuilder allows extensions to add additional parameters to +// the GetPayload request. +type GetPayloadOptsBuilder interface { + ToSecretPayloadGetParams() (map[string]string, error) +} + +// ToSecretPayloadGetParams formats a GetPayloadOpts into a query string. +func (opts GetPayloadOpts) ToSecretPayloadGetParams() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// GetPayload retrieves the payload of a secret. +func GetPayload(client *gophercloud.ServiceClient, id string, opts GetPayloadOptsBuilder) (r PayloadResult) { + h := map[string]string{"Accept": "text/plain"} + + if opts != nil { + headers, err := opts.ToSecretPayloadGetParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + + url := payloadURL(client, id) + resp, err := client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200}, + }) + + if resp != nil { + r.Header = resp.Header + r.Body = resp.Body + } + r.Err = err + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to +// the Create request. +type CreateOptsBuilder interface { + ToSecretCreateMap() (map[string]interface{}, error) +} + +// CreateOpts provides options used to create a secrets. +type CreateOpts struct { + // Algorithm is the algorithm of the secret. + Algorithm string `json:"algorithm,omitempty"` + + // BitLength is the bit length of the secret. + BitLength int `json:"bit_length,omitempty"` + + // Mode is the mode of encryption for the secret. + Mode string `json:"mode,omitempty"` + + // Name is the name of the secret + Name string `json:"name,omitempty"` + + // Payload is the secret. + Payload string `json:"payload,omitempty"` + + // PayloadContentType is the content type of the payload. + PayloadContentType string `json:"payload_content_type,omitempty"` + + // PayloadContentEncoding is the content encoding of the payload. + PayloadContentEncoding string `json:"payload_content_encoding,omitempty"` + + // SecretType is the type of secret. + SecretType SecretType `json:"secret_type,omitempty"` +} + +// ToSecretCreateMap formats a CreateOpts into a create request. +func (opts CreateOpts) ToSecretCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create creates a new secrets. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecretCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), &b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// Delete deletes a secrets. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to +// the Update request. +type UpdateOptsBuilder interface { + ToSecretUpdateRequest() (string, map[string]string, error) +} + +// UpdateOpts represents parameters to add a payload to an existing +// secret which does not already contain a payload. +type UpdateOpts struct { + // ContentType represents the content type of the payload. + ContentType string `h:"Content-Type"` + + // ContentEncoding represents the content encoding of the payload. + ContentEncoding string `h:"Content-Encoding"` + + // Payload is the payload of the secret. + Payload string +} + +// ToUpdateCreateRequest formats a UpdateOpts into an update request. +func (opts UpdateOpts) ToSecretUpdateRequest() (string, map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return "", nil, err + } + + return opts.Payload, h, nil +} + +// Update modifies the attributes of a secrets. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + url := updateURL(client, id) + h := make(map[string]string) + var b string + + if opts != nil { + payload, headers, err := opts.ToSecretUpdateRequest() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + + b = payload + } + + resp, err := client.Put(url, nil, nil, &gophercloud.RequestOpts{ + RawBody: strings.NewReader(b), + MoreHeaders: h, + OkCodes: []int{204}, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + + return +} + +// GetMetadata will list metadata for a given secret. +func GetMetadata(client *gophercloud.ServiceClient, secretID string) (r MetadataResult) { + _, r.Err = client.Get(metadataURL(client, secretID), &r.Body, nil) + return +} + +// MetadataOpts is a map that contains key-value pairs for secret metadata. +type MetadataOpts map[string]string + +// CreateMetadataOptsBuilder allows extensions to add additional parameters to +// the CreateMetadata request. +type CreateMetadataOptsBuilder interface { + ToMetadataCreateMap() (map[string]interface{}, error) +} + +// ToMetadataCreateMap converts a MetadataOpts into a request body. +func (opts MetadataOpts) ToMetadataCreateMap() (map[string]interface{}, error) { + return map[string]interface{}{"metadata": opts}, nil +} + +// CreateMetadata will set metadata for a given secret. +func CreateMetadata(client *gophercloud.ServiceClient, secretID string, opts CreateMetadataOptsBuilder) (r MetadataCreateResult) { + b, err := opts.ToMetadataCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadataURL(client, secretID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// GetMetadatum will get a single key/value metadata from a secret. +func GetMetadatum(client *gophercloud.ServiceClient, secretID string, key string) (r MetadatumResult) { + _, r.Err = client.Get(metadatumURL(client, secretID, key), &r.Body, nil) + return +} + +// MetadatumOpts represents a single metadata. +type MetadatumOpts struct { + Key string `json:"key" required:"true"` + Value string `json:"value" required:"true"` +} + +// CreateMetadatumOptsBuilder allows extensions to add additional parameters to +// the CreateMetadatum request. +type CreateMetadatumOptsBuilder interface { + ToMetadatumCreateMap() (map[string]interface{}, error) +} + +// ToMetadatumCreateMap converts a MetadatumOpts into a request body. +func (opts MetadatumOpts) ToMetadatumCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// CreateMetadatum will add a single key/value metadata to a secret. +func CreateMetadatum(client *gophercloud.ServiceClient, secretID string, opts CreateMetadatumOptsBuilder) (r MetadatumCreateResult) { + b, err := opts.ToMetadatumCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(metadataURL(client, secretID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateMetadatumOptsBuilder allows extensions to add additional parameters to +// the UpdateMetadatum request. +type UpdateMetadatumOptsBuilder interface { + ToMetadatumUpdateMap() (map[string]interface{}, string, error) +} + +// ToMetadatumUpdateMap converts a MetadataOpts into a request body. +func (opts MetadatumOpts) ToMetadatumUpdateMap() (map[string]interface{}, string, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + return b, opts.Key, err +} + +// UpdateMetadatum will update a single key/value metadata to a secret. +func UpdateMetadatum(client *gophercloud.ServiceClient, secretID string, opts UpdateMetadatumOptsBuilder) (r MetadatumResult) { + b, key, err := opts.ToMetadatumUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(metadatumURL(client, secretID, key), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteMetadatum will delete an individual metadatum from a secret. +func DeleteMetadatum(client *gophercloud.ServiceClient, secretID string, key string) (r MetadatumDeleteResult) { + _, r.Err = client.Delete(metadatumURL(client, secretID, key), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/results.go new file mode 100644 index 000000000000..a3230a8b4fbd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/results.go @@ -0,0 +1,227 @@ +package secrets + +import ( + "encoding/json" + "io" + "io/ioutil" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Secret represents a secret stored in the key manager service. +type Secret struct { + // BitLength is the bit length of the secret. + BitLength int `json:"bit_length"` + + // Algorithm is the algorithm type of the secret. + Algorithm string `json:"algorithm"` + + // Expiration is the expiration date of the secret. + Expiration time.Time `json:"-"` + + // ContentTypes are the content types of the secret. + ContentTypes map[string]string `json:"content_types"` + + // Created is the created date of the secret. + Created time.Time `json:"-"` + + // CreatorID is the creator of the secret. + CreatorID string `json:"creator_id"` + + // Mode is the mode of the secret. + Mode string `json:"mode"` + + // Name is the name of the secret. + Name string `json:"name"` + + // SecretRef is the URL to the secret. + SecretRef string `json:"secret_ref"` + + // SecretType represents the type of secret. + SecretType string `json:"secret_type"` + + // Status represents the status of the secret. + Status string `json:"status"` + + // Updated is the updated date of the secret. + Updated time.Time `json:"-"` +} + +func (r *Secret) UnmarshalJSON(b []byte) error { + type tmp Secret + var s struct { + tmp + Created gophercloud.JSONRFC3339NoZ `json:"created"` + Updated gophercloud.JSONRFC3339NoZ `json:"updated"` + Expiration gophercloud.JSONRFC3339NoZ `json:"expiration"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Secret(s.tmp) + + r.Created = time.Time(s.Created) + r.Updated = time.Time(s.Updated) + r.Expiration = time.Time(s.Expiration) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract interprets any commonResult as a Secret. +func (r commonResult) Extract() (*Secret, error) { + var s *Secret + err := r.ExtractInto(&s) + return s, err +} + +// GetResult is the response from a Get operation. Call its Extract method +// to interpret it as a secrets. +type GetResult struct { + commonResult +} + +// CreateResult is the response from a Create operation. Call its Extract method +// to interpret it as a secrets. +type CreateResult struct { + commonResult +} + +// UpdateResult is the response from an Update operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type UpdateResult struct { + gophercloud.ErrResult +} + +// DeleteResult is the response from a Delete operation. Call its ExtractErr to +// determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// PayloadResult is the response from a GetPayload operation. Call its Extract +// method to extract the payload as a string. +type PayloadResult struct { + gophercloud.Result + Body io.ReadCloser +} + +// Extract is a function that takes a PayloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// to the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (r PayloadResult) Extract() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + return body, nil +} + +// SecretPage is a single page of secrets results. +type SecretPage struct { + pagination.LinkedPageBase +} + +// IsEmpty determines whether or not a page of secrets contains any results. +func (r SecretPage) IsEmpty() (bool, error) { + secrets, err := ExtractSecrets(r) + return len(secrets) == 0, err +} + +// NextPageURL extracts the "next" link from the links section of the result. +func (r SecretPage) NextPageURL() (string, error) { + var s struct { + Next string `json:"next"` + Previous string `json:"previous"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return s.Next, err +} + +// ExtractSecrets returns a slice of Secrets contained in a single page of +// results. +func ExtractSecrets(r pagination.Page) ([]Secret, error) { + var s struct { + Secrets []Secret `json:"secrets"` + } + err := (r.(SecretPage)).ExtractInto(&s) + return s.Secrets, err +} + +// MetadataResult is the result of a metadata request. Call its Extract method +// to interpret it as a map[string]string. +type MetadataResult struct { + gophercloud.Result +} + +// Extract interprets any MetadataResult as map[string]string. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Metadata map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Metadata, err +} + +// MetadataCreateResult is the result of a metadata create request. Call its +// Extract method to interpret it as a map[string]string. +type MetadataCreateResult struct { + gophercloud.Result +} + +// Extract interprets any MetadataCreateResult as a map[string]string. +func (r MetadataCreateResult) Extract() (map[string]string, error) { + var s map[string]string + err := r.ExtractInto(&s) + return s, err +} + +// Metadatum represents an individual metadata. +type Metadatum struct { + Key string `json:"key"` + Value string `json:"value"` +} + +// MetadatumResult is the result of a metadatum request. Call its +// Extract method to interpret it as a map[string]string. +type MetadatumResult struct { + gophercloud.Result +} + +// Extract interprets any MetadatumResult as a map[string]string. +func (r MetadatumResult) Extract() (*Metadatum, error) { + var s *Metadatum + err := r.ExtractInto(&s) + return s, err +} + +// MetadatumCreateResult is the response from a metadata Create operation. Call +// it's ExtractErr to determine if the request succeeded or failed. +// +// NOTE: This could be a MetadatumResponse but, at the time of testing, it looks +// like Barbican was returning errneous JSON in the response. +type MetadatumCreateResult struct { + gophercloud.ErrResult +} + +// MetadatumDeleteResult is the response from a metadatum Delete operation. Call +// its ExtractErr to determine if the request succeeded or failed. +type MetadatumDeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/fixtures.go new file mode 100644 index 000000000000..2b50658b81c6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/fixtures.go @@ -0,0 +1,355 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListResponse provides a single page of RESOURCE results. +const ListResponse = ` +{ + "secrets": [ + { + "algorithm": "aes", + "bit_length": 256, + "content_types": { + "default": "text/plain" + }, + "created": "2018-06-21T02:49:48", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "expiration": null, + "mode": "cbc", + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + "secret_type": "opaque", + "status": "ACTIVE", + "updated": "2018-06-21T02:49:48" + }, + { + "algorithm": "aes", + "bit_length": 256, + "content_types": { + "default": "text/plain" + }, + "created": "2018-06-21T05:18:45", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "expiration": null, + "mode": "cbc", + "name": "anothersecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac", + "secret_type": "opaque", + "status": "ACTIVE", + "updated": "2018-06-21T05:18:45" + } + ], + "total": 2 +}` + +// GetResponse provides a Get result. +const GetResponse = ` +{ + "algorithm": "aes", + "bit_length": 256, + "content_types": { + "default": "text/plain" + }, + "created": "2018-06-21T02:49:48", + "creator_id": "5c70d99f4a8641c38f8084b32b5e5c0e", + "expiration": null, + "mode": "cbc", + "name": "mysecret", + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + "secret_type": "opaque", + "status": "ACTIVE", + "updated": "2018-06-21T02:49:48" +}` + +// GetPayloadResponse provides a payload result. +const GetPayloadResponse = `foobar` + +// CreateRequest provides the input to a Create request. +const CreateRequest = ` +{ + "algorithm": "aes", + "bit_length": 256, + "mode": "cbc", + "name": "mysecret", + "payload": "foobar", + "payload_content_type": "text/plain", + "secret_type": "opaque" +}` + +// CreateResponse provides a Create result. +const CreateResponse = ` +{ + "secret_ref": "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c" +}` + +// UpdateRequest provides the input to as Update request. +const UpdateRequest = `foobar` + +// FirstSecret is the first secret in the List request. +var FirstSecret = secrets.Secret{ + Algorithm: "aes", + BitLength: 256, + ContentTypes: map[string]string{ + "default": "text/plain", + }, + Created: time.Date(2018, 6, 21, 2, 49, 48, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Mode: "cbc", + Name: "mysecret", + SecretRef: "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", + SecretType: "opaque", + Status: "ACTIVE", + Updated: time.Date(2018, 6, 21, 2, 49, 48, 0, time.UTC), +} + +// SecondSecret is the second secret in the List request. +var SecondSecret = secrets.Secret{ + Algorithm: "aes", + BitLength: 256, + ContentTypes: map[string]string{ + "default": "text/plain", + }, + Created: time.Date(2018, 6, 21, 5, 18, 45, 0, time.UTC), + CreatorID: "5c70d99f4a8641c38f8084b32b5e5c0e", + Mode: "cbc", + Name: "anothersecret", + SecretRef: "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac", + SecretType: "opaque", + Status: "ACTIVE", + Updated: time.Date(2018, 6, 21, 5, 18, 45, 0, time.UTC), +} + +// ExpectedSecretsSlice is the slice of secrets expected to be returned from ListResponse. +var ExpectedSecretsSlice = []secrets.Secret{FirstSecret, SecondSecret} + +// ExpectedCreateResult is the result of a create request +var ExpectedCreateResult = secrets.Secret{ + SecretRef: "http://barbican:9311/v1/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", +} + +const GetMetadataResponse = ` +{ + "metadata": { + "foo": "bar", + "something": "something else" + } +}` + +// ExpectedMetadata is the result of a Get or Create request. +var ExpectedMetadata = map[string]string{ + "foo": "bar", + "something": "something else", +} + +const CreateMetadataRequest = ` +{ + "metadata": { + "foo": "bar", + "something": "something else" + } +}` + +const CreateMetadataResponse = ` +{ + "metadata_ref": "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac/metadata" +}` + +// ExpectedCreateMetadataResult is the result of a Metadata create request. +var ExpectedCreateMetadataResult = map[string]string{ + "metadata_ref": "http://barbican:9311/v1/secrets/1b12b69a-8822-442e-a303-da24ade648ac/metadata", +} + +const MetadatumRequest = ` +{ + "key": "foo", + "value": "bar" +}` + +const MetadatumResponse = ` +{ + "key": "foo", + "value": "bar" +}` + +// ExpectedMetadatum is the result of a Metadatum Get, Create, or Update +// request +var ExpectedMetadatum = secrets.Metadatum{ + Key: "foo", + Value: "bar", +} + +// HandleListSecretsSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that responds with a list of two secrets. +func HandleListSecretsSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ListResponse) + }) +} + +// HandleGetSecretSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that responds with a single secret. +func HandleGetSecretSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetResponse) + }) +} + +// HandleGetPayloadSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that responds with a single secret. +func HandleGetPayloadSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/payload", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetPayloadResponse) + }) +} + +// HandleCreateSecretSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret creation. +func HandleCreateSecretSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, CreateResponse) + }) +} + +// HandleDeleteSecretSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret deletion. +func HandleDeleteSecretSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateSecretSuccessfully creates an HTTP handler at `/secrets` on the +// test handler mux that tests secret updates. +func HandleUpdateSecretSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetMetadataSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata` on the test handler mux that responds with +// retrieved metadata. +func HandleGetMetadataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, GetMetadataResponse) + }) +} + +// HandleCreateMetadataSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata` on the test handler mux that responds with +// a metadata reference URL. +func HandleCreateMetadataSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, CreateMetadataRequest) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, CreateMetadataResponse) + }) +} + +// HandleGetMetadatumSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata/foo` on the test handler mux that responds with a +// single metadatum. +func HandleGetMetadatumSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, MetadatumResponse) + }) +} + +// HandleCreateMetadatumSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata` on the test handler mux that responds with +// a single created metadata. +func HandleCreateMetadatumSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, MetadatumRequest) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, MetadatumResponse) + }) +} + +// HandleUpdateMetadatumSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata/foo` on the test handler mux that responds with a +// single updated metadatum. +func HandleUpdateMetadatumSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, MetadatumRequest) + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, MetadatumResponse) + }) +} + +// HandleDeleteMetadatumSuccessfully creates an HTTP handler at +// `/secrets/uuid/metadata/key` on the test handler mux that tests metadata +// deletion. +func HandleDeleteMetadatumSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/secrets/1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c/metadata/foo", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/requests_test.go new file mode 100644 index 000000000000..d63879c9423a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/testing/requests_test.go @@ -0,0 +1,179 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListSecrets(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSecretsSuccessfully(t) + + count := 0 + err := secrets.List(client.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + + actual, err := secrets.ExtractSecrets(page) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ExpectedSecretsSlice, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.AssertEquals(t, count, 1) +} + +func TestListSecretsAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSecretsSuccessfully(t) + + allPages, err := secrets.List(client.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + actual, err := secrets.ExtractSecrets(allPages) + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedSecretsSlice, actual) +} + +func TestGetSecret(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSecretSuccessfully(t) + + actual, err := secrets.Get(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, FirstSecret, *actual) +} + +func TestCreateSecret(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSecretSuccessfully(t) + + createOpts := secrets.CreateOpts{ + Algorithm: "aes", + BitLength: 256, + Mode: "cbc", + Name: "mysecret", + Payload: "foobar", + PayloadContentType: "text/plain", + SecretType: secrets.OpaqueSecret, + } + + actual, err := secrets.Create(client.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCreateResult, *actual) +} + +func TestDeleteSecret(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSecretSuccessfully(t) + + res := secrets.Delete(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateSecret(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSecretSuccessfully(t) + + updateOpts := secrets.UpdateOpts{ + Payload: "foobar", + } + + err := secrets.Update(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetPayloadSecret(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetPayloadSuccessfully(t) + + res := secrets.GetPayload(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", nil) + th.AssertNoErr(t, res.Err) + payload, err := res.Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, GetPayloadResponse, string(payload)) +} + +func TestGetMetadataSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetMetadataSuccessfully(t) + + actual, err := secrets.GetMetadata(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedMetadata, actual) +} + +func TestCreateMetadataSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateMetadataSuccessfully(t) + + createOpts := secrets.MetadataOpts{ + "foo": "bar", + "something": "something else", + } + + actual, err := secrets.CreateMetadata(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", createOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedCreateMetadataResult, actual) +} + +func TestGetMetadatumSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetMetadatumSuccessfully(t) + + actual, err := secrets.GetMetadatum(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", "foo").Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedMetadatum, *actual) +} + +func TestCreateMetadatumSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateMetadatumSuccessfully(t) + + createOpts := secrets.MetadatumOpts{ + Key: "foo", + Value: "bar", + } + + err := secrets.CreateMetadatum(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", createOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateMetadatumSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateMetadatumSuccessfully(t) + + updateOpts := secrets.MetadatumOpts{ + Key: "foo", + Value: "bar", + } + + actual, err := secrets.UpdateMetadatum(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", updateOpts).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ExpectedMetadatum, *actual) +} + +func TestDeleteMetadatumSuccessfully(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteMetadatumSuccessfully(t) + + err := secrets.DeleteMetadatum(client.ServiceClient(), "1b8068c4-3bb6-4be6-8f1e-da0d1ea0b67c", "foo").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/urls.go new file mode 100644 index 000000000000..ebd636a66e8a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/keymanager/v1/secrets/urls.go @@ -0,0 +1,35 @@ +package secrets + +import "github.com/gophercloud/gophercloud" + +func listURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("secrets") +} + +func getURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("secrets", id) +} + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("secrets") +} + +func deleteURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("secrets", id) +} + +func updateURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("secrets", id) +} + +func payloadURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("secrets", id, "payload") +} + +func metadataURL(client *gophercloud.ServiceClient, id string) string { + return client.ServiceURL("secrets", id, "metadata") +} + +func metadatumURL(client *gophercloud.ServiceClient, id, key string) string { + return client.ServiceURL("secrets", id, "metadata", key) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/doc.go new file mode 100644 index 000000000000..61b0b8b6b279 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/doc.go @@ -0,0 +1,25 @@ +/* +Package amphorae provides information and interaction with Amphorae +of OpenStack Load-balancing service. + +Example to List Amphorae + + listOpts := amphorae.ListOpts{ + LoadbalancerID: "6bd55cd3-802e-447e-a518-1e74e23bb106", + } + + allPages, err := amphorae.List(octaviaClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allAmphorae, err := amphorae.ExtractAmphorae(allPages) + if err != nil { + panic(err) + } + + for _, amphora := range allAmphorae { + fmt.Printf("%+v\n", amphora) + } +*/ +package amphorae diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/requests.go new file mode 100644 index 000000000000..557d052fb634 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/requests.go @@ -0,0 +1,59 @@ +package amphorae + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToAmphoraListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Amphorae attributes you want to see returned. SortKey allows you to +// sort by a particular attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + LoadbalancerID string `q:"loadbalancer_id"` + ImageID string `q:"image_id"` + Role string `q:"role"` + Status string `q:"status"` + HAPortID string `q:"ha_port_id"` + VRRPPortID string `q:"vrrp_port_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToAmphoraListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToAmphoraListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// amphorae. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToAmphoraListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return AmphoraPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a particular amphora based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/results.go new file mode 100644 index 000000000000..9560a53df107 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/results.go @@ -0,0 +1,148 @@ +package amphorae + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Amphora is virtual machine, container, dedicated hardware, appliance or device that actually performs the task of +// load balancing in the Octavia system. +type Amphora struct { + // The unique ID for the Amphora. + ID string `json:"id"` + + // The ID of the load balancer. + LoadbalancerID string `json:"loadbalancer_id"` + + // The management IP of the amphora. + LBNetworkIP string `json:"lb_network_ip"` + + // The ID of the amphora resource in the compute system. + ComputeID string `json:"compute_id"` + + // The IP address of the Virtual IP (VIP). + HAIP string `json:"ha_ip"` + + // The ID of the Virtual IP (VIP) port. + HAPortID string `json:"ha_port_id"` + + // The date the certificate for the amphora expires. + CertExpiration time.Time `json:"-"` + + // Whether the certificate is in the process of being replaced. + CertBusy bool `json:"cert_busy"` + + // The role of the amphora. One of STANDALONE, MASTER, BACKUP. + Role string `json:"role"` + + // The status of the amphora. One of: BOOTING, ALLOCATED, READY, PENDING_CREATE, PENDING_DELETE, DELETED, ERROR. + Status string `json:"status"` + + // The vrrp port’s ID in the networking system. + VRRPPortID string `json:"vrrp_port_id"` + + // The address of the vrrp port on the amphora. + VRRPIP string `json:"vrrp_ip"` + + // The bound interface name of the vrrp port on the amphora. + VRRPInterface string `json:"vrrp_interface"` + + // The vrrp group’s ID for the amphora. + VRRPID int `json:"vrrp_id"` + + // The priority of the amphora in the vrrp group. + VRRPPriority int `json:"vrrp_priority"` + + // The availability zone of a compute instance, cached at create time. This is not guaranteed to be current. May be + // an empty-string if the compute service does not use zones. + CachedZone string `json:"cached_zone"` + + // The ID of the glance image used for the amphora. + ImageID string `json:"image_id"` + + // The UTC date and timestamp when the resource was created. + CreatedAt time.Time `json:"-"` + + // The UTC date and timestamp when the resource was last updated. + UpdatedAt time.Time `json:"-"` +} + +func (a *Amphora) UnmarshalJSON(b []byte) error { + type tmp Amphora + var s struct { + tmp + CertExpiration gophercloud.JSONRFC3339NoZ `json:"cert_expiration"` + CreatedAt gophercloud.JSONRFC3339NoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339NoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *a = Amphora(s.tmp) + + a.CreatedAt = time.Time(s.CreatedAt) + a.UpdatedAt = time.Time(s.UpdatedAt) + a.CertExpiration = time.Time(s.CertExpiration) + + return nil +} + +// AmphoraPage is the page returned by a pager when traversing over a +// collection of amphorae. +type AmphoraPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of amphoraes has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r AmphoraPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"amphorae_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a AmphoraPage struct is empty. +func (r AmphoraPage) IsEmpty() (bool, error) { + is, err := ExtractAmphorae(r) + return len(is) == 0, err +} + +// ExtractAmphorae accepts a Page struct, specifically a AmphoraPage +// struct, and extracts the elements into a slice of Amphora structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractAmphorae(r pagination.Page) ([]Amphora, error) { + var s struct { + Amphorae []Amphora `json:"amphorae"` + } + err := (r.(AmphoraPage)).ExtractInto(&s) + return s.Amphorae, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an amphora. +func (r commonResult) Extract() (*Amphora, error) { + var s struct { + Amphora *Amphora `json:"amphora"` + } + err := r.ExtractInto(&s) + return s.Amphora, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as an amphora. +type GetResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/doc.go new file mode 100644 index 000000000000..7603f836a002 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/doc.go @@ -0,0 +1 @@ +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/fixtures.go new file mode 100644 index 000000000000..c5ed496c7b81 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/fixtures.go @@ -0,0 +1,170 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "time" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// AmphoraeListBody contains the canned body of a amphora list response. +const AmphoraeListBody = ` +{ + "amphorae": [ + { + "cached_zone": "nova", + "cert_busy": false, + "cert_expiration": "2020-08-08T23:44:31", + "compute_id": "667bb225-69aa-44b1-8908-694dc624c267", + "created_at": "2018-08-09T23:44:31", + "ha_ip": "10.0.0.6", + "ha_port_id": "35254b63-9361-4561-9b8f-2bb4e3be60e3", + "id": "45f40289-0551-483a-b089-47214bc2a8a4", + "image_id": "5d1aed06-2624-43f5-a413-9212263c3d53", + "lb_network_ip": "192.168.0.6", + "loadbalancer_id": "882f2a9d-9d53-4bd0-b0e9-08e9d0de11f9", + "role": "MASTER", + "status": "READY", + "updated_at": "2018-08-09T23:51:06", + "vrrp_id": 1, + "vrrp_interface": "eth1", + "vrrp_ip": "10.0.0.4", + "vrrp_port_id": "dcf0c8b5-6a08-4658-997d-eac97f2b9bbd", + "vrrp_priority": 100 + }, + { + "cached_zone": "nova", + "cert_busy": false, + "cert_expiration": "2020-08-08T23:44:30", + "compute_id": "9cd0f9a2-fe12-42fc-a7e3-5b6fbbe20395", + "created_at": "2018-08-09T23:44:31", + "ha_ip": "10.0.0.6", + "ha_port_id": "35254b63-9361-4561-9b8f-2bb4e3be60e3", + "id": "7f890893-ced0-46ed-8697-33415d070e5a", + "image_id": "5d1aed06-2624-43f5-a413-9212263c3d53", + "lb_network_ip": "192.168.0.17", + "loadbalancer_id": "882f2a9d-9d53-4bd0-b0e9-08e9d0de11f9", + "role": "BACKUP", + "status": "READY", + "updated_at": "2018-08-09T23:51:06", + "vrrp_id": 1, + "vrrp_interface": "eth1", + "vrrp_ip": "10.0.0.21", + "vrrp_port_id": "13c88c77-207d-4f85-8f7a-84344592e367", + "vrrp_priority": 90 + } + ], + "amphorae_links": [] +} +` + +const SingleAmphoraBody = ` +{ + "amphora": { + "cached_zone": "nova", + "cert_busy": false, + "cert_expiration": "2020-08-08T23:44:31", + "compute_id": "667bb225-69aa-44b1-8908-694dc624c267", + "created_at": "2018-08-09T23:44:31", + "ha_ip": "10.0.0.6", + "ha_port_id": "35254b63-9361-4561-9b8f-2bb4e3be60e3", + "id": "45f40289-0551-483a-b089-47214bc2a8a4", + "image_id": "5d1aed06-2624-43f5-a413-9212263c3d53", + "lb_network_ip": "192.168.0.6", + "loadbalancer_id": "882f2a9d-9d53-4bd0-b0e9-08e9d0de11f9", + "role": "MASTER", + "status": "READY", + "updated_at": "2018-08-09T23:51:06", + "vrrp_id": 1, + "vrrp_interface": "eth1", + "vrrp_ip": "10.0.0.4", + "vrrp_port_id": "dcf0c8b5-6a08-4658-997d-eac97f2b9bbd", + "vrrp_priority": 100 + } +} +` + +// FirstAmphora is the first resource in the List request. +var FirstAmphora = amphorae.Amphora{ + CachedZone: "nova", + CertBusy: false, + CertExpiration: time.Date(2020, 8, 8, 23, 44, 31, 0, time.UTC), + ComputeID: "667bb225-69aa-44b1-8908-694dc624c267", + CreatedAt: time.Date(2018, 8, 9, 23, 44, 31, 0, time.UTC), + HAIP: "10.0.0.6", + HAPortID: "35254b63-9361-4561-9b8f-2bb4e3be60e3", + ID: "45f40289-0551-483a-b089-47214bc2a8a4", + ImageID: "5d1aed06-2624-43f5-a413-9212263c3d53", + LBNetworkIP: "192.168.0.6", + LoadbalancerID: "882f2a9d-9d53-4bd0-b0e9-08e9d0de11f9", + Role: "MASTER", + Status: "READY", + UpdatedAt: time.Date(2018, 8, 9, 23, 51, 6, 0, time.UTC), + VRRPID: 1, + VRRPInterface: "eth1", + VRRPIP: "10.0.0.4", + VRRPPortID: "dcf0c8b5-6a08-4658-997d-eac97f2b9bbd", + VRRPPriority: 100, +} + +// SecondAmphora is the second resource in the List request. +var SecondAmphora = amphorae.Amphora{ + CachedZone: "nova", + CertBusy: false, + CertExpiration: time.Date(2020, 8, 8, 23, 44, 30, 0, time.UTC), + ComputeID: "9cd0f9a2-fe12-42fc-a7e3-5b6fbbe20395", + CreatedAt: time.Date(2018, 8, 9, 23, 44, 31, 0, time.UTC), + HAIP: "10.0.0.6", + HAPortID: "35254b63-9361-4561-9b8f-2bb4e3be60e3", + ID: "7f890893-ced0-46ed-8697-33415d070e5a", + ImageID: "5d1aed06-2624-43f5-a413-9212263c3d53", + LBNetworkIP: "192.168.0.17", + LoadbalancerID: "882f2a9d-9d53-4bd0-b0e9-08e9d0de11f9", + Role: "BACKUP", + Status: "READY", + UpdatedAt: time.Date(2018, 8, 9, 23, 51, 6, 0, time.UTC), + VRRPID: 1, + VRRPInterface: "eth1", + VRRPIP: "10.0.0.21", + VRRPPortID: "13c88c77-207d-4f85-8f7a-84344592e367", + VRRPPriority: 90, +} + +// ExpectedAmphoraeSlice is the slice of amphorae expected to be returned from ListResponse. +var ExpectedAmphoraeSlice = []amphorae.Amphora{FirstAmphora, SecondAmphora} + +// HandleAmphoraListSuccessfully sets up the test server to respond to a amphorae List request. +func HandleAmphoraListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/octavia/amphorae", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, AmphoraeListBody) + case "7f890893-ced0-46ed-8697-33415d070e5a": + fmt.Fprintf(w, `{ "amphorae": [] }`) + default: + t.Fatalf("/v2.0/octavia/amphorae invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleAmphoraGetSuccessfully sets up the test server to respond to am amphora Get request. +func HandleAmphoraGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/octavia/amphorae/45f40289-0551-483a-b089-47214bc2a8a4", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleAmphoraBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/requests_test.go new file mode 100644 index 000000000000..bc500425868e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/testing/requests_test.go @@ -0,0 +1,65 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae" + fake "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListAmphorae(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAmphoraListSuccessfully(t) + + pages := 0 + err := amphorae.List(fake.ServiceClient(), amphorae.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := amphorae.ExtractAmphorae(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 amphorae, got %d", len(actual)) + } + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllAmphorae(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAmphoraListSuccessfully(t) + + allPages, err := amphorae.List(fake.ServiceClient(), amphorae.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := amphorae.ExtractAmphorae(allPages) + th.AssertNoErr(t, err) + th.AssertEquals(t, 2, len(actual)) + th.AssertDeepEquals(t, ExpectedAmphoraeSlice, actual) +} + +func TestGetAmphora(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAmphoraGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := amphorae.Get(client, "45f40289-0551-483a-b089-47214bc2a8a4").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, FirstAmphora, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/urls.go new file mode 100644 index 000000000000..2f9fb7e8fb77 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/amphorae/urls.go @@ -0,0 +1,16 @@ +package amphorae + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "octavia" + resourcePath = "amphorae" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/doc.go new file mode 100644 index 000000000000..ec7f9d6f049a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/doc.go @@ -0,0 +1,3 @@ +// Package lbaas_v2 provides information and interaction with the Load Balancer +// as a Service v2 extension for the OpenStack Networking service. +package lbaas_v2 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/doc.go new file mode 100644 index 000000000000..813579905c25 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/doc.go @@ -0,0 +1,123 @@ +/* +Package l7policies provides information and interaction with L7Policies and +Rules of the LBaaS v2 extension for the OpenStack Networking service. + +Example to Create a L7Policy + + createOpts := l7policies.CreateOpts{ + Name: "redirect-example.com", + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: l7policies.ActionRedirectToURL, + RedirectURL: "http://www.example.com", + } + l7policy, err := l7policies.Create(lbClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List L7Policies + + listOpts := l7policies.ListOpts{ + ListenerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + allPages, err := l7policies.List(lbClient, listOpts).AllPages() + if err != nil { + panic(err) + } + allL7Policies, err := l7policies.ExtractL7Policies(allPages) + if err != nil { + panic(err) + } + for _, l7policy := range allL7Policies { + fmt.Printf("%+v\n", l7policy) + } + +Example to Get a L7Policy + + l7policy, err := l7policies.Get(lbClient, "023f2e34-7806-443b-bfae-16c324569a3d").Extract() + if err != nil { + panic(err) + } + +Example to Delete a L7Policy + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := l7policies.Delete(lbClient, l7policyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update a L7Policy + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + name := "new-name" + updateOpts := l7policies.UpdateOpts{ + Name: &name, + } + l7policy, err := l7policies.Update(lbClient, l7policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + createOpts := l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images*", + } + rule, err := l7policies.CreateRule(lbClient, l7policyID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List L7 Rules + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + listOpts := l7policies.ListRulesOpts{ + RuleType: l7policies.TypePath, + } + allPages, err := l7policies.ListRules(lbClient, l7policyID, listOpts).AllPages() + if err != nil { + panic(err) + } + allRules, err := l7policies.ExtractRules(allPages) + if err != nil { + panic(err) + } + for _, rule := allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Get a l7 rule + + l7rule, err := l7policies.GetRule(lbClient, "023f2e34-7806-443b-bfae-16c324569a3d", "53ad8ab8-40fa-11e8-a508-00224d6b7bc1").Extract() + if err != nil { + panic(err) + } + +Example to Delete a l7 rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + ruleID := "64dba99f-8af8-4200-8882-e32a0660f23e" + err := l7policies.DeleteRule(lbClient, l7policyID, ruleID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update a Rule + + l7policyID := "d67d56a6-4a86-4688-a282-f46444705c64" + ruleID := "64dba99f-8af8-4200-8882-e32a0660f23e" + updateOpts := l7policies.UpdateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images/special*", + } + rule, err := l7policies.UpdateRule(lbClient, l7policyID, ruleID, updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package l7policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/requests.go new file mode 100644 index 000000000000..c8887ae3b668 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/requests.go @@ -0,0 +1,328 @@ +package l7policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToL7PolicyCreateMap() (map[string]interface{}, error) +} + +type Action string +type RuleType string +type CompareType string + +const ( + ActionRedirectToPool Action = "REDIRECT_TO_POOL" + ActionRedirectToURL Action = "REDIRECT_TO_URL" + ActionReject Action = "REJECT" + + TypeCookie RuleType = "COOKIE" + TypeFileType RuleType = "FILE_TYPE" + TypeHeader RuleType = "HEADER" + TypeHostName RuleType = "HOST_NAME" + TypePath RuleType = "PATH" + + CompareTypeContains CompareType = "CONTAINS" + CompareTypeEndWith CompareType = "ENDS_WITH" + CompareTypeEqual CompareType = "EQUAL_TO" + CompareTypeRegex CompareType = "REGEX" + CompareTypeStartWith CompareType = "STARTS_WITH" +) + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Name of the L7 policy. + Name string `json:"name,omitempty"` + + // The ID of the listener. + ListenerID string `json:"listener_id" required:"true"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action Action `json:"action" required:"true"` + + // The position of this policy on the listener. + Position int32 `json:"position,omitempty"` + + // A human-readable description for the resource. + Description string `json:"description,omitempty"` + + // ProjectID is the UUID of the project who owns the L7 policy in octavia. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID string `json:"redirect_pool_id,omitempty"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL string `json:"redirect_url,omitempty"` +} + +// ToL7PolicyCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToL7PolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "l7policy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new l7policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToL7PolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToL7PolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. +type ListOpts struct { + Name string `q:"name"` + ListenerID string `q:"listener_id"` + Action string `q:"action"` + ProjectID string `q:"project_id"` + RedirectPoolID string `q:"redirect_pool_id"` + RedirectURL string `q:"redirect_url"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToL7PolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToL7PolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// l7policies. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those l7policies that are owned by the +// project who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToL7PolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return L7PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a particular l7policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular l7policy based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToL7PolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the L7 policy, empty string is allowed. + Name *string `json:"name,omitempty"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action Action `json:"action,omitempty"` + + // The position of this policy on the listener. + Position int32 `json:"position,omitempty"` + + // A human-readable description for the resource, empty string is allowed. + Description *string `json:"description,omitempty"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID string `json:"redirect_pool_id,omitempty"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL string `json:"redirect_url,omitempty"` +} + +// ToL7PolicyUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToL7PolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "l7policy") +} + +// Update allows l7policy to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToL7PolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// CreateRuleOpts is the common options struct used in this package's CreateRule +// operation. +type CreateRuleOpts struct { + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType RuleType `json:"type" required:"true"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType CompareType `json:"compare_type" required:"true"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value" required:"true"` + + // ProjectID is the UUID of the project who owns the rule in octavia. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key string `json:"key,omitempty"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert bool `json:"invert,omitempty"` +} + +// ToRuleCreateMap builds a request body from CreateRuleOpts. +func (opts CreateRuleOpts) ToRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rule") +} + +// CreateRule will create and associate a Rule with a particular L7Policy. +func CreateRule(c *gophercloud.ServiceClient, policyID string, opts CreateRuleOpts) (r CreateRuleResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(ruleRootURL(c, policyID), b, &r.Body, nil) + return +} + +// ListRulesOptsBuilder allows extensions to add additional parameters to the +// ListRules request. +type ListRulesOptsBuilder interface { + ToRulesListQuery() (string, error) +} + +// ListRulesOpts allows the filtering and sorting of paginated collections +// through the API. +type ListRulesOpts struct { + RuleType RuleType `q:"type"` + ProjectID string `q:"project_id"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRulesListQuery formats a ListOpts into a query string. +func (opts ListRulesOpts) ToRulesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListRules returns a Pager which allows you to iterate over a collection of +// rules. It accepts a ListRulesOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those rules that are owned by the +// project who submits the request, unless an admin user submits the request. +func ListRules(c *gophercloud.ServiceClient, policyID string, opts ListRulesOptsBuilder) pagination.Pager { + url := ruleRootURL(c, policyID) + if opts != nil { + query, err := opts.ToRulesListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// GetRule retrieves a particular L7Policy Rule based on its unique ID. +func GetRule(c *gophercloud.ServiceClient, policyID string, ruleID string) (r GetRuleResult) { + _, r.Err = c.Get(ruleResourceURL(c, policyID, ruleID), &r.Body, nil) + return +} + +// DeleteRule will remove a Rule from a particular L7Policy. +func DeleteRule(c *gophercloud.ServiceClient, policyID string, ruleID string) (r DeleteRuleResult) { + _, r.Err = c.Delete(ruleResourceURL(c, policyID, ruleID), nil) + return +} + +// UpdateRuleOptsBuilder allows to add additional parameters to the PUT request. +type UpdateRuleOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateRuleOpts is the common options struct used in this package's Update +// operation. +type UpdateRuleOpts struct { + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType RuleType `json:"type,omitempty"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType CompareType `json:"compare_type,omitempty"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value,omitempty"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key string `json:"key,omitempty"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert bool `json:"invert,omitempty"` +} + +// ToRuleUpdateMap builds a request body from UpdateRuleOpts. +func (opts UpdateRuleOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rule") +} + +// UpdateRule allows Rule to be updated. +func UpdateRule(c *gophercloud.ServiceClient, policyID string, ruleID string, opts UpdateRuleOptsBuilder) (r UpdateRuleResult) { + b, err := opts.ToRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(ruleResourceURL(c, policyID, ruleID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/results.go new file mode 100644 index 000000000000..43c8b74d8af9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/results.go @@ -0,0 +1,223 @@ +package l7policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// L7Policy is a collection of L7 rules associated with a Listener, and which +// may also have an association to a back-end pool. +type L7Policy struct { + // The unique ID for the L7 policy. + ID string `json:"id"` + + // Name of the L7 policy. + Name string `json:"name"` + + // The ID of the listener. + ListenerID string `json:"listener_id"` + + // The L7 policy action. One of REDIRECT_TO_POOL, REDIRECT_TO_URL, or REJECT. + Action string `json:"action"` + + // The position of this policy on the listener. + Position int32 `json:"position"` + + // A human-readable description for the resource. + Description string `json:"description"` + + // ProjectID is the UUID of the project who owns the L7 policy in octavia. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id"` + + // Requests matching this policy will be redirected to the pool with this ID. + // Only valid if action is REDIRECT_TO_POOL. + RedirectPoolID string `json:"redirect_pool_id"` + + // Requests matching this policy will be redirected to this URL. + // Only valid if action is REDIRECT_TO_URL. + RedirectURL string `json:"redirect_url"` + + // The administrative state of the L7 policy, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Rules are List of associated L7 rule IDs. + Rules []Rule `json:"rules"` +} + +// Rule represents layer 7 load balancing rule. +type Rule struct { + // The unique ID for the L7 rule. + ID string `json:"id"` + + // The L7 rule type. One of COOKIE, FILE_TYPE, HEADER, HOST_NAME, or PATH. + RuleType string `json:"type"` + + // The comparison type for the L7 rule. One of CONTAINS, ENDS_WITH, EQUAL_TO, REGEX, or STARTS_WITH. + CompareType string `json:"compare_type"` + + // The value to use for the comparison. For example, the file type to compare. + Value string `json:"value"` + + // ProjectID is the UUID of the project who owns the rule in octavia. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id"` + + // The key to use for the comparison. For example, the name of the cookie to evaluate. + Key string `json:"key"` + + // When true the logic of the rule is inverted. For example, with invert true, + // equal to would become not equal to. Default is false. + Invert bool `json:"invert"` + + // The administrative state of the L7 rule, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a l7policy. +func (r commonResult) Extract() (*L7Policy, error) { + var s struct { + L7Policy *L7Policy `json:"l7policy"` + } + err := r.ExtractInto(&s) + return s.L7Policy, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a L7Policy. +type CreateResult struct { + commonResult +} + +// L7PolicyPage is the page returned by a pager when traversing over a +// collection of l7policies. +type L7PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of l7policies has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r L7PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"l7policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a L7PolicyPage struct is empty. +func (r L7PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractL7Policies(r) + return len(is) == 0, err +} + +// ExtractL7Policies accepts a Page struct, specifically a L7PolicyPage struct, +// and extracts the elements into a slice of L7Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractL7Policies(r pagination.Page) ([]L7Policy, error) { + var s struct { + L7Policies []L7Policy `json:"l7policies"` + } + err := (r.(L7PolicyPage)).ExtractInto(&s) + return s.L7Policies, err +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a L7Policy. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a L7Policy. +type UpdateResult struct { + commonResult +} + +type commonRuleResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a rule. +func (r commonRuleResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// CreateRuleResult represents the result of a CreateRule operation. +// Call its Extract method to interpret it as a Rule. +type CreateRuleResult struct { + commonRuleResult +} + +// RulePage is the page returned by a pager when traversing over a +// collection of Rules in a L7Policy. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of rules has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r RulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (r RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a RulePage struct, +// and extracts the elements into a slice of Rules structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]Rule, error) { + var s struct { + Rules []Rule `json:"rules"` + } + err := (r.(RulePage)).ExtractInto(&s) + return s.Rules, err +} + +// GetRuleResult represents the result of a GetRule operation. +// Call its Extract method to interpret it as a Rule. +type GetRuleResult struct { + commonRuleResult +} + +// DeleteRuleResult represents the result of a DeleteRule operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteRuleResult struct { + gophercloud.ErrResult +} + +// UpdateRuleResult represents the result of an UpdateRule operation. +// Call its Extract method to interpret it as a Rule. +type UpdateRuleResult struct { + commonRuleResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/doc.go new file mode 100644 index 000000000000..f8068dfb6bb9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/doc.go @@ -0,0 +1,2 @@ +// l7policies unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/fixtures.go new file mode 100644 index 000000000000..e14cf898d1c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/fixtures.go @@ -0,0 +1,375 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// SingleL7PolicyBody is the canned body of a Get request on an existing l7policy. +const SingleL7PolicyBody = ` +{ + "l7policy": { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "", + "admin_state_up": true, + "redirect_pool_id": null, + "redirect_url": "http://www.example.com", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "name": "redirect-example.com", + "rules": [] + } +} +` + +var ( + L7PolicyToURL = l7policies.L7Policy{ + ID: "8a1412f0-4c32-4257-8b07-af4770b604fd", + Name: "redirect-example.com", + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: "REDIRECT_TO_URL", + Position: 1, + Description: "", + ProjectID: "e3cd678b11784734bc366148aa37580e", + RedirectPoolID: "", + RedirectURL: "http://www.example.com", + AdminStateUp: true, + Rules: []l7policies.Rule{}, + } + L7PolicyToPool = l7policies.L7Policy{ + ID: "964f4ba4-f6cd-405c-bebd-639460af7231", + Name: "redirect-pool", + ListenerID: "be3138a3-5cf7-4513-a4c2-bb137e668bab", + Action: "REDIRECT_TO_POOL", + Position: 1, + Description: "", + ProjectID: "c1f7910086964990847dc6c8b128f63c", + RedirectPoolID: "bac433c6-5bea-4311-80da-bd1cd90fbd25", + RedirectURL: "", + AdminStateUp: true, + Rules: []l7policies.Rule{}, + } + L7PolicyUpdated = l7policies.L7Policy{ + ID: "8a1412f0-4c32-4257-8b07-af4770b604fd", + Name: "NewL7PolicyName", + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: "REDIRECT_TO_URL", + Position: 1, + Description: "Redirect requests to example.com", + ProjectID: "e3cd678b11784734bc366148aa37580e", + RedirectPoolID: "", + RedirectURL: "http://www.new-example.com", + AdminStateUp: true, + Rules: []l7policies.Rule{}, + } + RulePath = l7policies.Rule{ + ID: "16621dbb-a736-4888-a57a-3ecd53df784c", + RuleType: "PATH", + CompareType: "REGEX", + Value: "/images*", + ProjectID: "e3cd678b11784734bc366148aa37580e", + Key: "", + Invert: false, + AdminStateUp: true, + } + RuleHostName = l7policies.Rule{ + ID: "d24521a0-df84-4468-861a-a531af116d1e", + RuleType: "HOST_NAME", + CompareType: "EQUAL_TO", + Value: "www.example.com", + ProjectID: "e3cd678b11784734bc366148aa37580e", + Key: "", + Invert: false, + AdminStateUp: true, + } + RuleUpdated = l7policies.Rule{ + ID: "16621dbb-a736-4888-a57a-3ecd53df784c", + RuleType: "PATH", + CompareType: "REGEX", + Value: "/images/special*", + ProjectID: "e3cd678b11784734bc366148aa37580e", + Key: "", + Invert: false, + AdminStateUp: true, + } +) + +// HandleL7PolicyCreationSuccessfully sets up the test server to respond to a l7policy creation request +// with a given response. +func HandleL7PolicyCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "l7policy": { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "redirect_url": "http://www.example.com", + "name": "redirect-example.com", + "action": "REDIRECT_TO_URL" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// L7PoliciesListBody contains the canned body of a l7policy list response. +const L7PoliciesListBody = ` +{ + "l7policies": [ + { + "redirect_pool_id": null, + "description": "", + "admin_state_up": true, + "rules": [], + "project_id": "e3cd678b11784734bc366148aa37580e", + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "redirect_url": "http://www.example.com", + "action": "REDIRECT_TO_URL", + "position": 1, + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "name": "redirect-example.com" + }, + { + "redirect_pool_id": "bac433c6-5bea-4311-80da-bd1cd90fbd25", + "description": "", + "admin_state_up": true, + "rules": [], + "project_id": "c1f7910086964990847dc6c8b128f63c", + "listener_id": "be3138a3-5cf7-4513-a4c2-bb137e668bab", + "action": "REDIRECT_TO_POOL", + "position": 1, + "id": "964f4ba4-f6cd-405c-bebd-639460af7231", + "name": "redirect-pool" + } + ] +} +` + +// PostUpdateL7PolicyBody is the canned response body of a Update request on an existing l7policy. +const PostUpdateL7PolicyBody = ` +{ + "l7policy": { + "listener_id": "023f2e34-7806-443b-bfae-16c324569a3d", + "description": "Redirect requests to example.com", + "admin_state_up": true, + "redirect_pool_id": null, + "redirect_url": "http://www.new-example.com", + "action": "REDIRECT_TO_URL", + "position": 1, + "project_id": "e3cd678b11784734bc366148aa37580e", + "id": "8a1412f0-4c32-4257-8b07-af4770b604fd", + "name": "NewL7PolicyName", + "rules": [] + } +} +` + +// HandleL7PolicyListSuccessfully sets up the test server to respond to a l7policy List request. +func HandleL7PolicyListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, L7PoliciesListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "l7policies": [] }`) + default: + t.Fatalf("/v2.0/lbaas/l7policies invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleL7PolicyGetSuccessfully sets up the test server to respond to a l7policy Get request. +func HandleL7PolicyGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleL7PolicyBody) + }) +} + +// HandleL7PolicyDeletionSuccessfully sets up the test server to respond to a l7policy deletion request. +func HandleL7PolicyDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleL7PolicyUpdateSuccessfully sets up the test server to respond to a l7policy Update request. +func HandleL7PolicyUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "l7policy": { + "name": "NewL7PolicyName", + "action": "REDIRECT_TO_URL", + "redirect_url": "http://www.new-example.com" + } + }`) + + fmt.Fprintf(w, PostUpdateL7PolicyBody) + }) +} + +// SingleRuleBody is the canned body of a Get request on an existing rule. +const SingleRuleBody = ` +{ + "rule": { + "compare_type": "REGEX", + "invert": false, + "admin_state_up": true, + "value": "/images*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c" + } +} +` + +// HandleRuleCreationSuccessfully sets up the test server to respond to a rule creation request +// with a given response. +func HandleRuleCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "rule": { + "compare_type": "REGEX", + "type": "PATH", + "value": "/images*" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// RulesListBody contains the canned body of a rule list response. +const RulesListBody = ` +{ + "rules":[ + { + "compare_type": "REGEX", + "invert": false, + "admin_state_up": true, + "value": "/images*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c" + }, + { + "compare_type": "EQUAL_TO", + "invert": false, + "admin_state_up": true, + "value": "www.example.com", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "HOST_NAME", + "id": "d24521a0-df84-4468-861a-a531af116d1e" + } + ] +} +` + +// HandleRuleListSuccessfully sets up the test server to respond to a rule List request. +func HandleRuleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, RulesListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "rules": [] }`) + default: + t.Fatalf("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleRuleGetSuccessfully sets up the test server to respond to a rule Get request. +func HandleRuleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleRuleBody) + }) +} + +// HandleRuleDeletionSuccessfully sets up the test server to respond to a rule deletion request. +func HandleRuleDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// PostUpdateRuleBody is the canned response body of a Update request on an existing rule. +const PostUpdateRuleBody = ` +{ + "rule": { + "compare_type": "REGEX", + "invert": false, + "admin_state_up": true, + "value": "/images/special*", + "key": null, + "project_id": "e3cd678b11784734bc366148aa37580e", + "type": "PATH", + "id": "16621dbb-a736-4888-a57a-3ecd53df784c" + } +} +` + +// HandleRuleUpdateSuccessfully sets up the test server to respond to a rule Update request. +func HandleRuleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/l7policies/8a1412f0-4c32-4257-8b07-af4770b604fd/rules/16621dbb-a736-4888-a57a-3ecd53df784c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "rule": { + "compare_type": "REGEX", + "type": "PATH", + "value": "/images/special*" + } + }`) + + fmt.Fprintf(w, PostUpdateRuleBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/requests_test.go new file mode 100644 index 000000000000..dc8475450551 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/testing/requests_test.go @@ -0,0 +1,292 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreateL7Policy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyCreationSuccessfully(t, SingleL7PolicyBody) + + actual, err := l7policies.Create(fake.ServiceClient(), l7policies.CreateOpts{ + Name: "redirect-example.com", + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: l7policies.ActionRedirectToURL, + RedirectURL: "http://www.example.com", + }).Extract() + + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, L7PolicyToURL, *actual) +} + +func TestRequiredL7PolicyCreateOpts(t *testing.T) { + // no param specified. + res := l7policies.Create(fake.ServiceClient(), l7policies.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + // Action is invalid. + res = l7policies.Create(fake.ServiceClient(), l7policies.CreateOpts{ + ListenerID: "023f2e34-7806-443b-bfae-16c324569a3d", + Action: l7policies.Action("invalid"), + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestListL7Policies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyListSuccessfully(t) + + pages := 0 + err := l7policies.List(fake.ServiceClient(), l7policies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := l7policies.ExtractL7Policies(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 l7policies, got %d", len(actual)) + } + th.CheckDeepEquals(t, L7PolicyToURL, actual[0]) + th.CheckDeepEquals(t, L7PolicyToPool, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllL7Policies(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyListSuccessfully(t) + + allPages, err := l7policies.List(fake.ServiceClient(), l7policies.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := l7policies.ExtractL7Policies(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, L7PolicyToURL, actual[0]) + th.CheckDeepEquals(t, L7PolicyToPool, actual[1]) +} + +func TestGetL7Policy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := l7policies.Get(client, "8a1412f0-4c32-4257-8b07-af4770b604fd").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, L7PolicyToURL, *actual) +} + +func TestDeleteL7Policy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyDeletionSuccessfully(t) + + res := l7policies.Delete(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateL7Policy(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleL7PolicyUpdateSuccessfully(t) + + client := fake.ServiceClient() + newName := "NewL7PolicyName" + actual, err := l7policies.Update(client, "8a1412f0-4c32-4257-8b07-af4770b604fd", + l7policies.UpdateOpts{ + Name: &newName, + Action: l7policies.ActionRedirectToURL, + RedirectURL: "http://www.new-example.com", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, L7PolicyUpdated, *actual) +} + +func TestUpdateL7PolicyWithInvalidOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + res := l7policies.Update(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.UpdateOpts{ + Action: l7policies.Action("invalid"), + }) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreateRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleCreationSuccessfully(t, SingleRuleBody) + + actual, err := l7policies.CreateRule(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images*", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, RulePath, *actual) +} + +func TestRequiredRuleCreateOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + res := l7policies.CreateRule(fake.ServiceClient(), "", l7policies.CreateRuleOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = l7policies.CreateRule(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = l7policies.CreateRule(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.CreateRuleOpts{ + RuleType: l7policies.RuleType("invalid"), + CompareType: l7policies.CompareTypeRegex, + Value: "/images*", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = l7policies.CreateRule(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.CreateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareType("invalid"), + Value: "/images*", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestListRules(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleListSuccessfully(t) + + pages := 0 + err := l7policies.ListRules(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.ListRulesOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := l7policies.ExtractRules(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 rules, got %d", len(actual)) + } + th.CheckDeepEquals(t, RulePath, actual[0]) + th.CheckDeepEquals(t, RuleHostName, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllRules(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleListSuccessfully(t) + + allPages, err := l7policies.ListRules(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", l7policies.ListRulesOpts{}).AllPages() + th.AssertNoErr(t, err) + + actual, err := l7policies.ExtractRules(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, RulePath, actual[0]) + th.CheckDeepEquals(t, RuleHostName, actual[1]) +} + +func TestGetRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := l7policies.GetRule(client, "8a1412f0-4c32-4257-8b07-af4770b604fd", "16621dbb-a736-4888-a57a-3ecd53df784c").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, RulePath, *actual) +} + +func TestDeleteRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleDeletionSuccessfully(t) + + res := l7policies.DeleteRule(fake.ServiceClient(), "8a1412f0-4c32-4257-8b07-af4770b604fd", "16621dbb-a736-4888-a57a-3ecd53df784c") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateRule(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleRuleUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := l7policies.UpdateRule(client, "8a1412f0-4c32-4257-8b07-af4770b604fd", "16621dbb-a736-4888-a57a-3ecd53df784c", l7policies.UpdateRuleOpts{ + RuleType: l7policies.TypePath, + CompareType: l7policies.CompareTypeRegex, + Value: "/images/special*", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, RuleUpdated, *actual) +} + +func TestUpdateRuleWithInvalidOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + res := l7policies.UpdateRule(fake.ServiceClient(), "", "", l7policies.UpdateRuleOpts{ + RuleType: l7policies.RuleType("invalid"), + }) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = l7policies.UpdateRule(fake.ServiceClient(), "", "", l7policies.UpdateRuleOpts{ + CompareType: l7policies.CompareType("invalid"), + }) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/urls.go new file mode 100644 index 000000000000..ecb607a8e89d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/l7policies/urls.go @@ -0,0 +1,25 @@ +package l7policies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "l7policies" + rulePath = "rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func ruleRootURL(c *gophercloud.ServiceClient, policyID string) string { + return c.ServiceURL(rootPath, resourcePath, policyID, rulePath) +} + +func ruleResourceURL(c *gophercloud.ServiceClient, policyID string, ruleID string) string { + return c.ServiceURL(rootPath, resourcePath, policyID, rulePath, ruleID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/doc.go new file mode 100644 index 000000000000..4ccb967755fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/doc.go @@ -0,0 +1,71 @@ +/* +Package listeners provides information and interaction with Listeners of the +LBaaS v2 extension for the OpenStack Networking service. + +Example to List Listeners + + listOpts := listeners.ListOpts{ + LoadbalancerID : "ca430f80-1737-4712-8dc6-3f640d55594b", + } + + allPages, err := listeners.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + panic(err) + } + + for _, listener := range allListeners { + fmt.Printf("%+v\n", listener) + } + +Example to Create a Listener + + createOpts := listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + } + + listener, err := listeners.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := listeners.UpdateOpts{ + ConnLimit: &i1001, + } + + listener, err := listeners.Update(networkClient, listenerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := listeners.Delete(networkClient, listenerID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get the Statistics of a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + stats, err := listeners.GetStats(networkClient, listenerID).Extract() + if err != nil { + panic(err) + } +*/ +package listeners diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go new file mode 100644 index 000000000000..ac587e258dd6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/requests.go @@ -0,0 +1,200 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Type Protocol represents a listener protocol. +type Protocol string + +// Supported attributes for create/update operations. +const ( + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToListenerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular listener attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + ProjectID string `q:"project_id"` + LoadbalancerID string `q:"loadbalancer_id"` + DefaultPoolID string `q:"default_pool_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToListenerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToListenerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// listeners. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those listeners that are owned by the +// project who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToListenerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ListenerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToListenerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options for creating a listener. +type CreateOpts struct { + // The load balancer on which to provision this listener. + LoadbalancerID string `json:"loadbalancer_id" required:"true"` + + // The protocol - can either be TCP, HTTP or HTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // ProjectID is only required if the caller has an admin role and wants + // to create a pool for another project. + ProjectID string `json:"project_id,omitempty"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // The ID of the default pool with which the Listener is associated. + DefaultPoolID string `json:"default_pool_id,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Create is an operation which provisions a new Listeners based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Users with an admin role can create Listeners on behalf of other projects by +// specifying a ProjectID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToListenerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Listeners based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToListenerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options for updating a Listener. +type UpdateOpts struct { + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Update is an operation which modifies the attributes of the specified +// Listener. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToListenerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Listeners based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// GetStats will return the shows the current statistics of a particular Listeners. +func GetStats(c *gophercloud.ServiceClient, id string) (r StatsResult) { + _, r.Err = c.Get(statisticsRootURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go new file mode 100644 index 000000000000..c3e712915741 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/results.go @@ -0,0 +1,168 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +type LoadBalancerID struct { + ID string `json:"id"` +} + +// Listener is the primary load balancing configuration object that specifies +// the loadbalancer and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type Listener struct { + // The unique ID for the Listener. + ID string `json:"id"` + + // Owner of the Listener. + ProjectID string `json:"project_id"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name"` + + // Human-readable description for the Listener. + Description string `json:"description"` + + // The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // The port on which to listen to client traffic that is associated with the + // Loadbalancer. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // The UUID of default pool. Must have compatible protocol with listener. + DefaultPoolID string `json:"default_pool_id"` + + // A list of load balancer IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // The maximum number of connections allowed for the Loadbalancer. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // The list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref"` + + // The administrative state of the Listener. A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Pools are the pools which are part of this listener. + Pools []pools.Pool `json:"pools"` + + // The provisioning status of the Listener. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +type Stats struct { + // The currently active connections. + ActiveConnections int `json:"active_connections"` + + // The total bytes received. + BytesIn int `json:"bytes_in"` + + // The total bytes sent. + BytesOut int `json:"bytes_out"` + + // The total requests that were unable to be fulfilled. + RequestErrors int `json:"request_errors"` + + // The total connections handled. + TotalConnections int `json:"total_connections"` +} + +// ListenerPage is the page returned by a pager when traversing over a +// collection of listeners. +type ListenerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of listeners has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ListenerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"listeners_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ListenerPage struct is empty. +func (r ListenerPage) IsEmpty() (bool, error) { + is, err := ExtractListeners(r) + return len(is) == 0, err +} + +// ExtractListeners accepts a Page struct, specifically a ListenerPage struct, +// and extracts the elements into a slice of Listener structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractListeners(r pagination.Page) ([]Listener, error) { + var s struct { + Listeners []Listener `json:"listeners"` + } + err := (r.(ListenerPage)).ExtractInto(&s) + return s.Listeners, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a listener. +func (r commonResult) Extract() (*Listener, error) { + var s struct { + Listener *Listener `json:"listener"` + } + err := r.ExtractInto(&s) + return s.Listener, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Listener. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Listener. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Listener. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// StatsResult represents the result of a GetStats operation. +// Call its Extract method to interpret it as a Stats. +type StatsResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Listener. +func (r StatsResult) Extract() (*Stats, error) { + var s struct { + Stats *Stats `json:"stats"` + } + err := r.ExtractInto(&s) + return s.Stats, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/doc.go new file mode 100644 index 000000000000..f41387e827c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/doc.go @@ -0,0 +1,2 @@ +// listeners unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/fixtures.go new file mode 100644 index 000000000000..966bcc455f72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/fixtures.go @@ -0,0 +1,244 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListenersListBody contains the canned body of a listeners list response. +const ListenersListBody = ` +{ + "listeners":[ + { + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "project_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "web", + "description": "listener config for the web tier", + "loadbalancers": [{"id": "53306cda-815d-4354-9444-59e09da9c3c5"}], + "protocol": "HTTP", + "protocol_port": 80, + "default_pool_id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 2000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } + ] +} +` + +// SingleServerBody is the canned body of a Get request on an existing listener. +const SingleListenerBody = ` +{ + "listener": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 2000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } +} +` + +// PostUpdateListenerBody is the canned response body of a Update request on an existing listener. +const PostUpdateListenerBody = ` +{ + "listener": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "NewListenerName", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 1000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } +} +` + +// GetListenerStatsBody is the canned request body of a Get request on listener's statistics. +const GetListenerStatsBody = ` +{ + "stats": { + "active_connections": 0, + "bytes_in": 9532, + "bytes_out": 22033, + "request_errors": 46, + "total_connections": 112 + } +} +` + +var ( + ListenerWeb = listeners.Listener{ + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + ProjectID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "web", + Description: "listener config for the web tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "53306cda-815d-4354-9444-59e09da9c3c5"}}, + Protocol: "HTTP", + ProtocolPort: 80, + DefaultPoolID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } + ListenerDb = listeners.Listener{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + ProjectID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "db", + Description: "listener config for the db tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Protocol: "TCP", + ProtocolPort: 3306, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ConnLimit: 2000, + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } + ListenerUpdated = listeners.Listener{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + ProjectID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "NewListenerName", + Description: "listener config for the db tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Protocol: "TCP", + ProtocolPort: 3306, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ConnLimit: 1000, + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } + ListenerStatsTree = listeners.Stats{ + ActiveConnections: 0, + BytesIn: 9532, + BytesOut: 22033, + RequestErrors: 46, + TotalConnections: 112, + } +) + +// HandleListenerListSuccessfully sets up the test server to respond to a listener List request. +func HandleListenerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ListenersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "listeners": [] }`) + default: + t.Fatalf("/v2.0/lbaas/listeners invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleListenerCreationSuccessfully sets up the test server to respond to a listener creation request +// with a given response. +func HandleListenerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "listener": { + "loadbalancer_id": "79e05663-7f03-45d2-a092-8b94062f22ab", + "protocol": "TCP", + "name": "db", + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "protocol_port": 3306 + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleListenerGetSuccessfully sets up the test server to respond to a listener Get request. +func HandleListenerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleListenerBody) + }) +} + +// HandleListenerDeletionSuccessfully sets up the test server to respond to a listener deletion request. +func HandleListenerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleListenerUpdateSuccessfully sets up the test server to respond to a listener Update request. +func HandleListenerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "listener": { + "name": "NewListenerName", + "connection_limit": 1001 + } + }`) + + fmt.Fprintf(w, PostUpdateListenerBody) + }) +} + +// HandleListenerGetStatsTree sets up the test server to respond to a listener Get stats tree request. +func HandleListenerGetStatsTree(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304/stats", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, GetListenerStatsBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/requests_test.go new file mode 100644 index 000000000000..f403fb8e9fb0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/testing/requests_test.go @@ -0,0 +1,151 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + fake "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListListeners(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerListSuccessfully(t) + + pages := 0 + err := listeners.List(fake.ServiceClient(), listeners.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := listeners.ExtractListeners(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 listeners, got %d", len(actual)) + } + th.CheckDeepEquals(t, ListenerWeb, actual[0]) + th.CheckDeepEquals(t, ListenerDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllListeners(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerListSuccessfully(t) + + allPages, err := listeners.List(fake.ServiceClient(), listeners.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := listeners.ExtractListeners(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ListenerWeb, actual[0]) + th.CheckDeepEquals(t, ListenerDb, actual[1]) +} + +func TestCreateListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerCreationSuccessfully(t, SingleListenerBody) + + actual, err := listeners.Create(fake.ServiceClient(), listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListenerDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := listeners.Create(fake.ServiceClient(), listeners.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", ProjectID: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", ProjectID: "bar", Protocol: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", ProjectID: "bar", Protocol: "bar", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := listeners.Get(client, "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, ListenerDb, *actual) +} + +func TestDeleteListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerDeletionSuccessfully(t) + + res := listeners.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerUpdateSuccessfully(t) + + client := fake.ServiceClient() + i1001 := 1001 + actual, err := listeners.Update(client, "4ec89087-d057-4e2c-911f-60a3b47ee304", listeners.UpdateOpts{ + Name: "NewListenerName", + ConnLimit: &i1001, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, ListenerUpdated, *actual) +} + +func TestGetListenerStatsTree(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerGetStatsTree(t) + + client := fake.ServiceClient() + actual, err := listeners.GetStats(client, "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, ListenerStatsTree, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/urls.go new file mode 100644 index 000000000000..e9e3bccd3e72 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners/urls.go @@ -0,0 +1,21 @@ +package listeners + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "listeners" + statisticsPath = "stats" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func statisticsRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statisticsPath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/doc.go new file mode 100644 index 000000000000..77cb8321b02b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/doc.go @@ -0,0 +1,93 @@ +/* +Package loadbalancers provides information and interaction with Load Balancers +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Load Balancers + + listOpts := loadbalancers.ListOpts{ + Provider: "haproxy", + } + + allPages, err := loadbalancers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + panic(err) + } + + for _, lb := range allLoadbalancers { + fmt.Printf("%+v\n", lb) + } + +Example to Create a Load Balancer + + createOpts := loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + } + + lb, err := loadbalancers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := loadbalancers.UpdateOpts{ + Name: "new-name", + } + + lb, err := loadbalancers.Update(networkClient, lbID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Load Balancers + + deleteOpts := loadbalancers.DeleteOpts{ + Cascade: true, + } + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + err := loadbalancers.Delete(networkClient, lbID, deleteOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get the Status of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + status, err := loadbalancers.GetStatuses(networkClient, LBID).Extract() + if err != nil { + panic(err) + } + +Example to Get the Statistics of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + stats, err := loadbalancers.GetStats(networkClient, LBID).Extract() + if err != nil { + panic(err) + } + +Example to Failover a Load Balancers + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + err := loadbalancers.Failover(networkClient, lbID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package loadbalancers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go new file mode 100644 index 000000000000..a50d8ef192fd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/requests.go @@ -0,0 +1,219 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToLoadBalancerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Loadbalancer attributes you want to see returned. SortKey allows you to +// sort by a particular attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + ProjectID string `q:"project_id"` + ProvisioningStatus string `q:"provisioning_status"` + VipAddress string `q:"vip_address"` + VipPortID string `q:"vip_port_id"` + VipSubnetID string `q:"vip_subnet_id"` + ID string `q:"id"` + OperatingStatus string `q:"operating_status"` + Name string `q:"name"` + Flavor string `q:"flavor"` + Provider string `q:"provider"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToLoadBalancerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToLoadBalancerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// load balancers. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those load balancers that are owned by +// the project who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToLoadBalancerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLoadBalancerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The network on which to allocate the Loadbalancer's address. A project can + // only create Loadbalancers on networks authorized by policy (e.g. networks + // that belong to them or networks that are shared). + VipSubnetID string `json:"vip_subnet_id" required:"true"` + + // ProjectID is the UUID of the project who owns the Loadbalancer. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The UUID of a flavor. + Flavor string `json:"flavor,omitempty"` + + // The name of the provider. + Provider string `json:"provider,omitempty"` +} + +// ToLoadBalancerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Create is an operation which provisions a new loadbalancer based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLoadBalancerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Loadbalancer based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLoadBalancerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLoadBalancerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Update is an operation which modifies the attributes of the specified +// LoadBalancer. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToLoadBalancerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToLoadBalancerDeleteQuery() (string, error) +} + +// DeleteOpts is the common options struct used in this package's Delete +// operation. +type DeleteOpts struct { + // Cascade will delete all children of the load balancer (listners, monitors, etc). + Cascade bool `q:"cascade"` +} + +// ToLoadBalancerDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToLoadBalancerDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete will permanently delete a particular LoadBalancer based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string, opts DeleteOptsBuilder) (r DeleteResult) { + url := resourceURL(c, id) + if opts != nil { + query, err := opts.ToLoadBalancerDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = c.Delete(url, nil) + return +} + +// GetStatuses will return the status of a particular LoadBalancer. +func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) { + _, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil) + return +} + +// GetStats will return the shows the current statistics of a particular LoadBalancer. +func GetStats(c *gophercloud.ServiceClient, id string) (r StatsResult) { + _, r.Err = c.Get(statisticsRootURL(c, id), &r.Body, nil) + return +} + +// Failover performs a failover of a load balancer. +func Failover(c *gophercloud.ServiceClient, id string) (r FailoverResult) { + _, r.Err = c.Put(failoverRootURL(c, id), nil, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/results.go new file mode 100644 index 000000000000..cb7fa60eef20 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/results.go @@ -0,0 +1,188 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/pagination" +) + +// LoadBalancer is the primary load balancing configuration object that +// specifies the virtual IP address on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type LoadBalancer struct { + // Human-readable description for the Loadbalancer. + Description string `json:"description"` + + // The administrative state of the Loadbalancer. + // A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the LoadBalancer. + ProjectID string `json:"project_id"` + + // The provisioning status of the LoadBalancer. + // This value is ACTIVE, PENDING_CREATE or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address"` + + // The UUID of the port associated with the IP address. + VipPortID string `json:"vip_port_id"` + + // The UUID of the subnet on which to allocate the virtual IP for the + // Loadbalancer address. + VipSubnetID string `json:"vip_subnet_id"` + + // The unique ID for the LoadBalancer. + ID string `json:"id"` + + // The operating status of the LoadBalancer. This value is ONLINE or OFFLINE. + OperatingStatus string `json:"operating_status"` + + // Human-readable name for the LoadBalancer. Does not have to be unique. + Name string `json:"name"` + + // The UUID of a flavor if set. + Flavor string `json:"flavor"` + + // The name of the provider. + Provider string `json:"provider"` + + // Listeners are the listeners related to this Loadbalancer. + Listeners []listeners.Listener `json:"listeners"` +} + +// StatusTree represents the status of a loadbalancer. +type StatusTree struct { + Loadbalancer *LoadBalancer `json:"loadbalancer"` +} + +type Stats struct { + // The currently active connections. + ActiveConnections int `json:"active_connections"` + + // The total bytes received. + BytesIn int `json:"bytes_in"` + + // The total bytes sent. + BytesOut int `json:"bytes_out"` + + // The total requests that were unable to be fulfilled. + RequestErrors int `json:"request_errors"` + + // The total connections handled. + TotalConnections int `json:"total_connections"` +} + +// LoadBalancerPage is the page returned by a pager when traversing over a +// collection of load balancers. +type LoadBalancerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of load balancers has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r LoadBalancerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"loadbalancers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a LoadBalancerPage struct is empty. +func (r LoadBalancerPage) IsEmpty() (bool, error) { + is, err := ExtractLoadBalancers(r) + return len(is) == 0, err +} + +// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage +// struct, and extracts the elements into a slice of LoadBalancer structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) { + var s struct { + LoadBalancers []LoadBalancer `json:"loadbalancers"` + } + err := (r.(LoadBalancerPage)).ExtractInto(&s) + return s.LoadBalancers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a loadbalancer. +func (r commonResult) Extract() (*LoadBalancer, error) { + var s struct { + LoadBalancer *LoadBalancer `json:"loadbalancer"` + } + err := r.ExtractInto(&s) + return s.LoadBalancer, err +} + +// GetStatusesResult represents the result of a GetStatuses operation. +// Call its Extract method to interpret it as a StatusTree. +type GetStatusesResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r GetStatusesResult) Extract() (*StatusTree, error) { + var s struct { + Statuses *StatusTree `json:"statuses"` + } + err := r.ExtractInto(&s) + return s.Statuses, err +} + +// StatsResult represents the result of a GetStats operation. +// Call its Extract method to interpret it as a Stats. +type StatsResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r StatsResult) Extract() (*Stats, error) { + var s struct { + Stats *Stats `json:"stats"` + } + err := r.ExtractInto(&s) + return s.Stats, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a LoadBalancer. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a LoadBalancer. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a LoadBalancer. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FailoverResult represents the result of a failover operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type FailoverResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/doc.go new file mode 100644 index 000000000000..b54468c82f15 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/doc.go @@ -0,0 +1,2 @@ +// loadbalancers unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/fixtures.go new file mode 100644 index 000000000000..8730d8c28637 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/fixtures.go @@ -0,0 +1,335 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/listeners" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" +) + +// LoadbalancersListBody contains the canned body of a loadbalancer list response. +const LoadbalancersListBody = ` +{ + "loadbalancers":[ + { + "id": "c331058c-6a40-4144-948e-b9fb1df9db4b", + "project_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "web_lb", + "description": "lb config for the web tier", + "vip_subnet_id": "8a49c438-848f-467b-9655-ea1548708154", + "vip_address": "10.30.176.47", + "vip_port_id": "2a22e552-a347-44fd-b530-1f2b1b2a6735", + "flavor": "small", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE" + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "db_lb", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } + ] +} +` + +// SingleLoadbalancerBody is the canned body of a Get request on an existing loadbalancer. +const SingleLoadbalancerBody = ` +{ + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "db_lb", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } +} +` + +// PostUpdateLoadbalancerBody is the canned response body of a Update request on an existing loadbalancer. +const PostUpdateLoadbalancerBody = ` +{ + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "project_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "NewLoadbalancerName", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } +} +` + +// GetLoadbalancerStatusesBody is the canned request body of a Get request on loadbalancer's status. +const GetLoadbalancerStatusesBody = ` +{ + "statuses" : { + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "name": "db_lb", + "provisioning_status": "PENDING_UPDATE", + "operating_status": "ACTIVE", + "listeners": [{ + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "name": "db", + "provisioning_status": "ACTIVE", + "pools": [{ + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "name": "db", + "provisioning_status": "ACTIVE", + "healthmonitor": { + "id": "67306cda-815d-4354-9fe4-59e09da9c3c5", + "type":"PING", + "provisioning_status": "ACTIVE" + }, + "members":[{ + "id": "2a280670-c202-4b0b-a562-34077415aabf", + "name": "db", + "address": "10.0.2.11", + "protocol_port": 80, + "provisioning_status": "ACTIVE" + }] + }] + }] + } + } +} +` + +// LoadbalancerStatsTree is the canned request body of a Get request on loadbalancer's statistics. +const GetLoadbalancerStatsBody = ` +{ + "stats": { + "active_connections": 0, + "bytes_in": 9532, + "bytes_out": 22033, + "request_errors": 46, + "total_connections": 112 + } +} +` + +var ( + LoadbalancerWeb = loadbalancers.LoadBalancer{ + ID: "c331058c-6a40-4144-948e-b9fb1df9db4b", + ProjectID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "web_lb", + Description: "lb config for the web tier", + VipSubnetID: "8a49c438-848f-467b-9655-ea1548708154", + VipAddress: "10.30.176.47", + VipPortID: "2a22e552-a347-44fd-b530-1f2b1b2a6735", + Flavor: "small", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "ACTIVE", + OperatingStatus: "ONLINE", + } + LoadbalancerDb = loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + ProjectID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "db_lb", + Description: "lb config for the db tier", + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + VipPortID: "2bf413c8-41a9-4477-b505-333d5cbe8b55", + Flavor: "medium", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "PENDING_CREATE", + OperatingStatus: "OFFLINE", + } + LoadbalancerUpdated = loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + ProjectID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "NewLoadbalancerName", + Description: "lb config for the db tier", + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + VipPortID: "2bf413c8-41a9-4477-b505-333d5cbe8b55", + Flavor: "medium", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "PENDING_CREATE", + OperatingStatus: "OFFLINE", + } + LoadbalancerStatusesTree = loadbalancers.StatusTree{ + Loadbalancer: &loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + Name: "db_lb", + ProvisioningStatus: "PENDING_UPDATE", + OperatingStatus: "ACTIVE", + Listeners: []listeners.Listener{{ + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + Name: "db", + ProvisioningStatus: "ACTIVE", + Pools: []pools.Pool{{ + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Name: "db", + ProvisioningStatus: "ACTIVE", + Monitor: monitors.Monitor{ + ID: "67306cda-815d-4354-9fe4-59e09da9c3c5", + Type: "PING", + ProvisioningStatus: "ACTIVE", + }, + Members: []pools.Member{{ + ID: "2a280670-c202-4b0b-a562-34077415aabf", + Name: "db", + Address: "10.0.2.11", + ProtocolPort: 80, + ProvisioningStatus: "ACTIVE", + }}, + }}, + }}, + }, + } + LoadbalancerStatsTree = loadbalancers.Stats{ + ActiveConnections: 0, + BytesIn: 9532, + BytesOut: 22033, + RequestErrors: 46, + TotalConnections: 112, + } +) + +// HandleLoadbalancerListSuccessfully sets up the test server to respond to a loadbalancer List request. +func HandleLoadbalancerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, LoadbalancersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "loadbalancers": [] }`) + default: + t.Fatalf("/v2.0/lbaas/loadbalancers invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleLoadbalancerCreationSuccessfully sets up the test server to respond to a loadbalancer creation request +// with a given response. +func HandleLoadbalancerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "loadbalancer": { + "name": "db_lb", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleLoadbalancerGetSuccessfully sets up the test server to respond to a loadbalancer Get request. +func HandleLoadbalancerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleLoadbalancerBody) + }) +} + +// HandleLoadbalancerGetStatusesTree sets up the test server to respond to a loadbalancer Get statuses tree request. +func HandleLoadbalancerGetStatusesTree(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab/status", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, GetLoadbalancerStatusesBody) + }) +} + +// HandleLoadbalancerDeletionSuccessfully sets up the test server to respond to a loadbalancer deletion request. +func HandleLoadbalancerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleLoadbalancerUpdateSuccessfully sets up the test server to respond to a loadbalancer Update request. +func HandleLoadbalancerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "loadbalancer": { + "name": "NewLoadbalancerName" + } + }`) + + fmt.Fprintf(w, PostUpdateLoadbalancerBody) + }) +} + +// HandleLoadbalancerGetStatsTree sets up the test server to respond to a loadbalancer Get stats tree request. +func HandleLoadbalancerGetStatsTree(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab/stats", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, GetLoadbalancerStatsBody) + }) +} + +// HandleLoadbalancerFailoverSuccessfully sets up the test server to respond to a loadbalancer failover request. +func HandleLoadbalancerFailoverSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab/failover", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/requests_test.go new file mode 100644 index 000000000000..b23625bebb5c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/testing/requests_test.go @@ -0,0 +1,185 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers" + fake "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListLoadbalancers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerListSuccessfully(t) + + pages := 0 + err := loadbalancers.List(fake.ServiceClient(), loadbalancers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := loadbalancers.ExtractLoadBalancers(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 loadbalancers, got %d", len(actual)) + } + th.CheckDeepEquals(t, LoadbalancerWeb, actual[0]) + th.CheckDeepEquals(t, LoadbalancerDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllLoadbalancers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerListSuccessfully(t) + + allPages, err := loadbalancers.List(fake.ServiceClient(), loadbalancers.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := loadbalancers.ExtractLoadBalancers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, LoadbalancerWeb, actual[0]) + th.CheckDeepEquals(t, LoadbalancerDb, actual[1]) +} + +func TestCreateLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerCreationSuccessfully(t, SingleLoadbalancerBody) + + actual, err := loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, LoadbalancerDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo", Description: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo", Description: "bar", VipAddress: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.Get(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerDb, *actual) +} + +func TestGetLoadbalancerStatusesTree(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerGetStatusesTree(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.GetStatuses(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerStatusesTree, *actual) +} + +func TestDeleteLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerDeletionSuccessfully(t) + + res := loadbalancers.Delete(fake.ServiceClient(), "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", nil) + th.AssertNoErr(t, res.Err) +} + +func TestUpdateLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.Update(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", loadbalancers.UpdateOpts{ + Name: "NewLoadbalancerName", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerUpdated, *actual) +} + +func TestCascadingDeleteLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerDeletionSuccessfully(t) + + sc := fake.ServiceClient() + deleteOpts := loadbalancers.DeleteOpts{ + Cascade: true, + } + + query, err := deleteOpts.ToLoadBalancerDeleteQuery() + th.AssertNoErr(t, err) + th.AssertEquals(t, query, "?cascade=true") + + err = loadbalancers.Delete(sc, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", deleteOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetLoadbalancerStatsTree(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerGetStatsTree(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.GetStats(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerStatsTree, *actual) +} + +func TestFailoverLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerFailoverSuccessfully(t) + + res := loadbalancers.Failover(fake.ServiceClient(), "36e08a3e-a78f-4b40-a229-1e7e23eee1ab") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/urls.go new file mode 100644 index 000000000000..7b184e35f6af --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/loadbalancers/urls.go @@ -0,0 +1,31 @@ +package loadbalancers + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "loadbalancers" + statusPath = "status" + statisticsPath = "stats" + failoverPath = "failover" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func statusRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statusPath) +} + +func statisticsRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statisticsPath) +} + +func failoverRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, failoverPath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/doc.go new file mode 100644 index 000000000000..6ed8c8fb5fff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/doc.go @@ -0,0 +1,69 @@ +/* +Package monitors provides information and interaction with Monitors +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Monitors + + listOpts := monitors.ListOpts{ + PoolID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/requests.go new file mode 100644 index 000000000000..c173e1c64e70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/requests.go @@ -0,0 +1,257 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToMonitorListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Monitor attributes you want to see returned. SortKey allows you to +// sort by a particular Monitor attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + PoolID string `q:"pool_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMonitorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToMonitorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// health monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those health monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToMonitorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Constants that represent approved monitoring types. +const ( + TypePING = "PING" + TypeTCP = "TCP" + TypeHTTP = "HTTP" + TypeHTTPS = "HTTPS" +) + +var ( + errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The Pool to Monitor. + PoolID string `json:"pool_id" required:"true"` + + // The type of probe, which is PING, TCP, HTTP, or HTTPS, that is + // sent by the load balancer to verify the member state. + Type string `json:"type" required:"true"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + switch opts.Type { + case TypeHTTP, TypeHTTPS: + switch opts.URLPath { + case "": + return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS") + } + switch opts.ExpectedCodes { + case "": + return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS") + } + } + + return b, nil +} + +/* + Create is an operation which provisions a new Health Monitor. There are + different types of Monitor you can provision: PING, TCP or HTTP(S). Below + are examples of how to create each one. + + Here is an example config struct to use when creating a PING or TCP Monitor: + + CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} + CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} + + Here is an example config struct to use when creating a HTTP(S) Monitor: + + CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, + HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"} +*/ +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Health Monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // The time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "healthmonitor") +} + +// Update is an operation which modifies the attributes of the specified +// Monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/results.go new file mode 100644 index 000000000000..ccf7d98509a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/results.go @@ -0,0 +1,153 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type PoolID struct { + ID string `json:"id"` +} + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // The unique ID for the Monitor. + ID string `json:"id"` + + // The Name of the Monitor. + Name string `json:"name"` + + // The owner of the Monitor. + ProjectID string `json:"project_id"` + + // The type of probe sent by the load balancer to verify the member state, + // which is PING, TCP, HTTP, or HTTPS. + Type string `json:"type"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay"` + + // The maximum number of seconds for a monitor to wait for a connection to be + // established before it times out. This value must be less than the delay + // value. + Timeout int `json:"timeout"` + + // Number of allowed connection failures before changing the status of the + // member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // The HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // The HTTP path of the request sent by the monitor to test the health of a + // member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path" ` + + // Expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // The administrative state of the health monitor, which is up (true) or + // down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The status of the health monitor. Indicates whether the health monitor is + // operational. + Status string `json:"status"` + + // List of pools that are associated with the health monitor. + Pools []PoolID `json:"pools"` + + // The provisioning status of the Monitor. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"healthmonitors_links"` + } + + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MonitorPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"healthmonitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"healthmonitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/doc.go new file mode 100644 index 000000000000..e2b6f12a9285 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/doc.go @@ -0,0 +1,2 @@ +// monitors unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/fixtures.go new file mode 100644 index 000000000000..23c097b90990 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/fixtures.go @@ -0,0 +1,215 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HealthmonitorsListBody contains the canned body of a healthmonitor list response. +const HealthmonitorsListBody = ` +{ + "healthmonitors":[ + { + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":10, + "name":"web", + "max_retries":1, + "timeout":1, + "type":"PING", + "pools": [{"id": "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d"}], + "id":"466c8345-28d8-4f84-a246-e04380b0461d" + }, + { + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "name":"db", + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } + ] +} +` + +// SingleHealthmonitorBody is the canned body of a Get request on an existing healthmonitor. +const SingleHealthmonitorBody = ` +{ + "healthmonitor": { + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "name":"db", + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } +} +` + +// PostUpdateHealthmonitorBody is the canned response body of a Update request on an existing healthmonitor. +const PostUpdateHealthmonitorBody = ` +{ + "healthmonitor": { + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":3, + "name":"NewHealthmonitorName", + "expected_codes":"301", + "max_retries":10, + "http_method":"GET", + "timeout":20, + "url_path":"/another_check", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } +} +` + +var ( + HealthmonitorWeb = monitors.Monitor{ + AdminStateUp: true, + Name: "web", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 10, + MaxRetries: 1, + Timeout: 1, + Type: "PING", + ID: "466c8345-28d8-4f84-a246-e04380b0461d", + Pools: []monitors.PoolID{{ID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d"}}, + } + HealthmonitorDb = monitors.Monitor{ + AdminStateUp: true, + Name: "db", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 5, + ExpectedCodes: "200", + MaxRetries: 2, + Timeout: 2, + URLPath: "/", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + Pools: []monitors.PoolID{{ID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}}, + } + HealthmonitorUpdated = monitors.Monitor{ + AdminStateUp: true, + Name: "NewHealthmonitorName", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 3, + ExpectedCodes: "301", + MaxRetries: 10, + Timeout: 20, + URLPath: "/another_check", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + Pools: []monitors.PoolID{{ID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}}, + } +) + +// HandleHealthmonitorListSuccessfully sets up the test server to respond to a healthmonitor List request. +func HandleHealthmonitorListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, HealthmonitorsListBody) + case "556c8345-28d8-4f84-a246-e04380b0461d": + fmt.Fprintf(w, `{ "healthmonitors": [] }`) + default: + t.Fatalf("/v2.0/lbaas/healthmonitors invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleHealthmonitorCreationSuccessfully sets up the test server to respond to a healthmonitor creation request +// with a given response. +func HandleHealthmonitorCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "healthmonitor": { + "type":"HTTP", + "pool_id":"84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + "project_id":"453105b9-1754-413f-aab1-55f1af620750", + "delay":20, + "name":"db", + "timeout":10, + "max_retries":5, + "url_path":"/check", + "expected_codes":"200-299" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleHealthmonitorGetSuccessfully sets up the test server to respond to a healthmonitor Get request. +func HandleHealthmonitorGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleHealthmonitorBody) + }) +} + +// HandleHealthmonitorDeletionSuccessfully sets up the test server to respond to a healthmonitor deletion request. +func HandleHealthmonitorDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleHealthmonitorUpdateSuccessfully sets up the test server to respond to a healthmonitor Update request. +func HandleHealthmonitorUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "healthmonitor": { + "name": "NewHealthmonitorName", + "delay": 3, + "timeout": 20, + "max_retries": 10, + "url_path": "/another_check", + "expected_codes": "301" + } + }`) + + fmt.Fprintf(w, PostUpdateHealthmonitorBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/requests_test.go new file mode 100644 index 000000000000..d850fe199b2e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/testing/requests_test.go @@ -0,0 +1,154 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + fake "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListHealthmonitors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorListSuccessfully(t) + + pages := 0 + err := monitors.List(fake.ServiceClient(), monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := monitors.ExtractMonitors(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 healthmonitors, got %d", len(actual)) + } + th.CheckDeepEquals(t, HealthmonitorWeb, actual[0]) + th.CheckDeepEquals(t, HealthmonitorDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllHealthmonitors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorListSuccessfully(t) + + allPages, err := monitors.List(fake.ServiceClient(), monitors.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := monitors.ExtractMonitors(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, HealthmonitorWeb, actual[0]) + th.CheckDeepEquals(t, HealthmonitorDb, actual[1]) +} + +func TestCreateHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorCreationSuccessfully(t, SingleHealthmonitorBody) + + actual, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + ProjectID: "453105b9-1754-413f-aab1-55f1af620750", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, HealthmonitorDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = monitors.Create(fake.ServiceClient(), monitors.CreateOpts{Type: monitors.TypeHTTP}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := monitors.Get(client, "5d4b5228-33b0-4e60-b225-9b727c1a20e7").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, HealthmonitorDb, *actual) +} + +func TestDeleteHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorDeletionSuccessfully(t) + + res := monitors.Delete(fake.ServiceClient(), "5d4b5228-33b0-4e60-b225-9b727c1a20e7") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := monitors.Update(client, "5d4b5228-33b0-4e60-b225-9b727c1a20e7", monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, HealthmonitorUpdated, *actual) +} + +func TestDelayMustBeGreaterOrEqualThanTimeout(t *testing.T) { + _, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + PoolID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d", + Delay: 1, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } + + _, err = monitors.Update(fake.ServiceClient(), "453105b9-1754-413f-aab1-55f1af620750", monitors.UpdateOpts{ + Delay: 1, + Timeout: 10, + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/urls.go new file mode 100644 index 000000000000..a222e52a93dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "healthmonitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/doc.go new file mode 100644 index 000000000000..2d57ed439385 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/doc.go @@ -0,0 +1,124 @@ +/* +Package pools provides information and interaction with Pools and +Members of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + LoadbalancerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, pools := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := pools.UpdateOpts{ + Name: "new-name", + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Pool Members + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + listOpts := pools.ListMemberOpts{ + ProtocolPort: 80, + } + + allPages, err := pools.ListMembers(networkClient, poolID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := pools.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + createOpts := pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: 10, + } + + member, err := pools.CreateMember(networkClient, poolID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + updateOpts := pools.UpdateMemberOpts{ + Name: "new-name", + Weight: 4, + } + + member, err := pools.UpdateMember(networkClient, poolID, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + err := pools.DeleteMember(networkClient, poolID, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/requests.go new file mode 100644 index 000000000000..538b2f1b2ac5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/requests.go @@ -0,0 +1,346 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Pool attributes you want to see returned. SortKey allows you to +// sort by a particular Pool attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + LBMethod string `q:"lb_algorithm"` + Protocol string `q:"protocol"` + ProjectID string `q:"project_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + LoadbalancerID string `q:"loadbalancer_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// project who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type LBMethod string +type Protocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + LBMethodSourceIp LBMethod = "SOURCE_IP" + + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm" required:"true"` + + // The protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The Loadbalancer on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"` + + // The Listener on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"` + + // ProjectID is the UUID of the project who owns the Pool. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // Persistence is the session persistence of the pool. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListMemberOptsBuilder allows extensions to add additional parameters to the +// ListMembers request. +type ListMembersOptsBuilder interface { + ToMembersListQuery() (string, error) +} + +// ListMembersOpts allows the filtering and sorting of paginated collections +// through the API. Filtering is achieved by passing in struct field values +// that map to the Member attributes you want to see returned. SortKey allows +// you to sort by a particular Member attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListMembersOpts struct { + Name string `q:"name"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + ProjectID string `q:"project_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMemberListQuery formats a ListOpts into a query string. +func (opts ListMembersOpts) ToMembersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMembers returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListMembersOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// project who submits the request, unless an admin user submits the request. +func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager { + url := memberRootURL(c, poolID) + if opts != nil { + query, err := opts.ToMembersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateMemberOptsBuilder allows extensions to add additional parameters to the +// CreateMember request. +type CreateMemberOptsBuilder interface { + ToMemberCreateMap() (map[string]interface{}, error) +} + +// CreateMemberOpts is the common options struct used in this package's CreateMember +// operation. +type CreateMemberOpts struct { + // The IP address of the member to receive traffic from the load balancer. + Address string `json:"address" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // Name of the Member. + Name string `json:"name,omitempty"` + + // ProjectID is the UUID of the project who owns the Member. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // If you omit this parameter, LBaaS uses the vip_subnet_id parameter value + // for the subnet UUID. + SubnetID string `json:"subnet_id,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberCreateMap builds a request body from CreateMemberOpts. +func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// CreateMember will create and associate a Member with a particular Pool. +func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) { + b, err := opts.ToMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil) + return +} + +// GetMember retrieves a particular Pool Member based on its unique ID. +func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) { + _, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil) + return +} + +// UpdateMemberOptsBuilder allows extensions to add additional parameters to the +// List request. +type UpdateMemberOptsBuilder interface { + ToMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateMemberOpts is the common options struct used in this package's Update +// operation. +type UpdateMemberOpts struct { + // Name of the Member. + Name string `json:"name,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberUpdateMap builds a request body from UpdateMemberOpts. +func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows Member to be updated. +func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) { + b, err := opts.ToMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// DisassociateMember will remove and disassociate a Member from a particular +// Pool. +func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) { + _, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/results.go new file mode 100644 index 000000000000..fe0d93325f44 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/results.go @@ -0,0 +1,281 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/monitors" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same Member of the Pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same Member of the Pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same Member of the Pool. +type SessionPersistence struct { + // The type of persistence mode. + Type string `json:"type"` + + // Name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// LoadBalancerID represents a load balancer. +type LoadBalancerID struct { + ID string `json:"id"` +} + +// ListenerID represents a listener. +type ListenerID struct { + ID string `json:"id"` +} + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a Member of the Pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +type Pool struct { + // The load-balancer algorithm, which is round-robin, least-connections, and + // so on. This value, which must be supported, is dependent on the provider. + // Round-robin must be supported. + LBMethod string `json:"lb_algorithm"` + + // The protocol of the Pool, which is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // Description for the Pool. + Description string `json:"description"` + + // A list of listeners objects IDs. + Listeners []ListenerID `json:"listeners"` //[]map[string]interface{} + + // A list of member objects IDs. + Members []Member `json:"members"` + + // The ID of associated health monitor. + MonitorID string `json:"healthmonitor_id"` + + // The network on which the members of the Pool will be located. Only members + // that are on this network can be added to the Pool. + SubnetID string `json:"subnet_id"` + + // Owner of the Pool. + ProjectID string `json:"project_id"` + + // The administrative state of the Pool, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Pool name. Does not have to be unique. + Name string `json:"name"` + + // The unique ID for the Pool. + ID string `json:"id"` + + // A list of load balancer objects IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // Indicates whether connections in the same session will be processed by the + // same Pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // The load balancer provider. + Provider string `json:"provider"` + + // The Monitor associated with this Pool. + Monitor monitors.Monitor `json:"healthmonitor"` + + // The provisioning status of the pool. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Pool structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a pool. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Member represents the application running on a backend server. +type Member struct { + // Name of the Member. + Name string `json:"name"` + + // Weight of Member. + Weight int `json:"weight"` + + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the Member. + ProjectID string `json:"project_id"` + + // Parameter value for the subnet UUID. + SubnetID string `json:"subnet_id"` + + // The Pool to which the Member belongs. + PoolID string `json:"pool_id"` + + // The IP address of the Member. + Address string `json:"address"` + + // The port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // The unique ID for the Member. + ID string `json:"id"` + + // The provisioning status of the pool. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of Members in a Pool. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Members structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonMemberResult struct { + gophercloud.Result +} + +// ExtractMember is a function that accepts a result and extracts a member. +func (r commonMemberResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateMemberResult represents the result of a CreateMember operation. +// Call its Extract method to interpret it as a Member. +type CreateMemberResult struct { + commonMemberResult +} + +// GetMemberResult represents the result of a GetMember operation. +// Call its Extract method to interpret it as a Member. +type GetMemberResult struct { + commonMemberResult +} + +// UpdateMemberResult represents the result of an UpdateMember operation. +// Call its Extract method to interpret it as a Member. +type UpdateMemberResult struct { + commonMemberResult +} + +// DeleteMemberResult represents the result of a DeleteMember operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteMemberResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/doc.go new file mode 100644 index 000000000000..46e335f3f2d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/doc.go @@ -0,0 +1,2 @@ +// pools unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/fixtures.go new file mode 100644 index 000000000000..9d929525d628 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/fixtures.go @@ -0,0 +1,388 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// PoolsListBody contains the canned body of a pool list response. +const PoolsListBody = ` +{ + "pools":[ + { + "lb_algorithm":"ROUND_ROBIN", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "466c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "53306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"72741b06-df4d-4715-b142-276b6bce75ab", + "name":"web", + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + }, + { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } + ] +} +` + +// SinglePoolBody is the canned body of a Get request on an existing pool. +const SinglePoolBody = ` +{ + "pool": { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } +} +` + +// PostUpdatePoolBody is the canned response body of a Update request on an existing pool. +const PostUpdatePoolBody = ` +{ + "pool": { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "project_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } +} +` + +var ( + PoolWeb = pools.Pool{ + LBMethod: "ROUND_ROBIN", + Protocol: "HTTP", + Description: "", + MonitorID: "466c8345-28d8-4f84-a246-e04380b0461d", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "web", + Members: []pools.Member{{ID: "53306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "72741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } + PoolDb = pools.Pool{ + LBMethod: "LEAST_CONNECTION", + Protocol: "HTTP", + Description: "", + MonitorID: "5f6c8345-28d8-4f84-a246-e04380b0461d", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "db", + Members: []pools.Member{{ID: "67306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "c3741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } + PoolUpdated = pools.Pool{ + LBMethod: "LEAST_CONNECTION", + Protocol: "HTTP", + Description: "", + MonitorID: "5f6c8345-28d8-4f84-a246-e04380b0461d", + ProjectID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "db", + Members: []pools.Member{{ID: "67306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "c3741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } +) + +// HandlePoolListSuccessfully sets up the test server to respond to a pool List request. +func HandlePoolListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, PoolsListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "pools": [] }`) + default: + t.Fatalf("/v2.0/lbaas/pools invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandlePoolCreationSuccessfully sets up the test server to respond to a pool creation request +// with a given response. +func HandlePoolCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "pool": { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "name": "Example pool", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "loadbalancer_id": "79e05663-7f03-45d2-a092-8b94062f22ab" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandlePoolGetSuccessfully sets up the test server to respond to a pool Get request. +func HandlePoolGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SinglePoolBody) + }) +} + +// HandlePoolDeletionSuccessfully sets up the test server to respond to a pool deletion request. +func HandlePoolDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandlePoolUpdateSuccessfully sets up the test server to respond to a pool Update request. +func HandlePoolUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "pool": { + "name": "NewPoolName", + "lb_algorithm": "LEAST_CONNECTIONS" + } + }`) + + fmt.Fprintf(w, PostUpdatePoolBody) + }) +} + +// MembersListBody contains the canned body of a member list response. +const MembersListBody = ` +{ + "members":[ + { + "id": "2a280670-c202-4b0b-a562-34077415aabf", + "address": "10.0.2.10", + "weight": 5, + "name": "web", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":true, + "protocol_port": 80 + }, + { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } + ] +} +` + +// SingleMemberBody is the canned body of a Get request on an existing member. +const SingleMemberBody = ` +{ + "member": { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } +} +` + +// PostUpdateMemberBody is the canned response body of a Update request on an existing member. +const PostUpdateMemberBody = ` +{ + "member": { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } +} +` + +var ( + MemberWeb = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + ProjectID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: true, + Name: "web", + ID: "2a280670-c202-4b0b-a562-34077415aabf", + Address: "10.0.2.10", + Weight: 5, + ProtocolPort: 80, + } + MemberDb = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + ProjectID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: false, + Name: "db", + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Address: "10.0.2.11", + Weight: 10, + ProtocolPort: 80, + } + MemberUpdated = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + ProjectID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: false, + Name: "db", + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Address: "10.0.2.11", + Weight: 10, + ProtocolPort: 80, + } +) + +// HandleMemberListSuccessfully sets up the test server to respond to a member List request. +func HandleMemberListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, MembersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "members": [] }`) + default: + t.Fatalf("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleMemberCreationSuccessfully sets up the test server to respond to a member creation request +// with a given response. +func HandleMemberCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "member": { + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "project_id": "2ffc6e22aae24e4795f87155d24c896f", + "protocol_port": 80 + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleMemberGetSuccessfully sets up the test server to respond to a member Get request. +func HandleMemberGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleMemberBody) + }) +} + +// HandleMemberDeletionSuccessfully sets up the test server to respond to a member deletion request. +func HandleMemberDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleMemberUpdateSuccessfully sets up the test server to respond to a member Update request. +func HandleMemberUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "member": { + "name": "newMemberName", + "weight": 4 + } + }`) + + fmt.Fprintf(w, PostUpdateMemberBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/requests_test.go new file mode 100644 index 000000000000..33c5c736423e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/testing/requests_test.go @@ -0,0 +1,262 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools" + fake "github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolListSuccessfully(t) + + pages := 0 + err := pools.List(fake.ServiceClient(), pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := pools.ExtractPools(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 pools, got %d", len(actual)) + } + th.CheckDeepEquals(t, PoolWeb, actual[0]) + th.CheckDeepEquals(t, PoolDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolListSuccessfully(t) + + allPages, err := pools.List(fake.ServiceClient(), pools.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := pools.ExtractPools(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, PoolWeb, actual[0]) + th.CheckDeepEquals(t, PoolDb, actual[1]) +} + +func TestCreatePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolCreationSuccessfully(t, SinglePoolBody) + + actual, err := pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + ProjectID: "2ffc6e22aae24e4795f87155d24c896f", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, PoolDb, *actual) +} + +func TestGetPool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.Get(client, "c3741b06-df4d-4715-b142-276b6bce75ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, PoolDb, *actual) +} + +func TestDeletePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolDeletionSuccessfully(t) + + res := pools.Delete(fake.ServiceClient(), "c3741b06-df4d-4715-b142-276b6bce75ab") + th.AssertNoErr(t, res.Err) +} + +func TestUpdatePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.Update(client, "c3741b06-df4d-4715-b142-276b6bce75ab", pools.UpdateOpts{ + Name: "NewPoolName", + LBMethod: pools.LBMethodLeastConnections, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, PoolUpdated, *actual) +} + +func TestRequiredPoolCreateOpts(t *testing.T) { + res := pools.Create(fake.ServiceClient(), pools.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethod("invalid"), + Protocol: pools.ProtocolHTTPS, + LoadbalancerID: "69055154-f603-4a28-8951-7cc2d9e54a9a", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: pools.Protocol("invalid"), + LoadbalancerID: "69055154-f603-4a28-8951-7cc2d9e54a9a", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: pools.ProtocolHTTPS, + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestListMembers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberListSuccessfully(t) + + pages := 0 + err := pools.ListMembers(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.ListMembersOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := pools.ExtractMembers(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 members, got %d", len(actual)) + } + th.CheckDeepEquals(t, MemberWeb, actual[0]) + th.CheckDeepEquals(t, MemberDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllMembers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberListSuccessfully(t) + + allPages, err := pools.ListMembers(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.ListMembersOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := pools.ExtractMembers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, MemberWeb, actual[0]) + th.CheckDeepEquals(t, MemberDb, actual[1]) +} + +func TestCreateMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberCreationSuccessfully(t, SingleMemberBody) + + actual, err := pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + ProjectID: "2ffc6e22aae24e4795f87155d24c896f", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: 10, + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, MemberDb, *actual) +} + +func TestRequiredMemberCreateOpts(t *testing.T) { + res := pools.CreateMember(fake.ServiceClient(), "", pools.CreateMemberOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = pools.CreateMember(fake.ServiceClient(), "", pools.CreateMemberOpts{Address: "1.2.3.4", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{Address: "1.2.3.4"}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestGetMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.GetMember(client, "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, MemberDb, *actual) +} + +func TestDeleteMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberDeletionSuccessfully(t) + + res := pools.DeleteMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.UpdateMember(client, "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf", pools.UpdateMemberOpts{ + Name: "newMemberName", + Weight: 4, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, MemberUpdated, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/urls.go new file mode 100644 index 000000000000..bceca67707f2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "pools" + memberPath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func memberRootURL(c *gophercloud.ServiceClient, poolId string) string { + return c.ServiceURL(rootPath, resourcePath, poolId, memberPath) +} + +func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memeberID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memeberID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper/client.go b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper/client.go new file mode 100644 index 000000000000..7e1d917280cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/loadbalancer/v2/testhelper/client.go @@ -0,0 +1,14 @@ +package common + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const TokenID = client.TokenID + +func ServiceClient() *gophercloud.ServiceClient { + sc := client.ServiceClient() + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/doc.go new file mode 100644 index 000000000000..1d9b1d0e8377 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/doc.go @@ -0,0 +1,53 @@ +/* +Package claims provides information and interaction with the Zaqar API +claims resource for the OpenStack Messaging service. + +Example to Create a Claim on a specified Zaqar queue + + createOpts := claims.CreateOpts{ + TTL: 60, + Grace: 120, + Limit: 20, + } + + queueName := "my_queue" + + messages, err := claims.Create(messagingClient, queueName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to get a claim for a specified Zaqar queue + + queueName := "my_queue" + claimID := "123456789012345678" + + claim, err := claims.Get(messagingClient, queueName, claimID).Extract() + if err != nil { + panic(err) + } + +Example to update a claim for a specified Zaqar queue + + updateOpts := claims.UpdateOpts{ + TTL: 600 + Grace: 1200 + } + + queueName := "my_queue" + + err := claims.Update(messagingClient, queueName, claimID, updateOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to delete a claim for a specified Zaqar queue + + queueName := "my_queue" + + err := claims.Delete(messagingClient, queueName, claimID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package claims diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/requests.go new file mode 100644 index 000000000000..21492e527d48 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/requests.go @@ -0,0 +1,120 @@ +package claims + +import ( + "net/http" + + "github.com/gophercloud/gophercloud" +) + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToClaimCreateRequest() (map[string]interface{}, string, error) +} + +// CreateOpts params to be used with Create. +type CreateOpts struct { + // Sets the TTL for the claim. When the claim expires un-deleted messages will be able to be claimed again. + TTL int `json:"ttl,omitempty"` + + // Sets the Grace period for the claimed messages. The server extends the lifetime of claimed messages + // to be at least as long as the lifetime of the claim itself, plus the specified grace period. + Grace int `json:"grace,omitempty"` + + // Set the limit of messages returned by create. + Limit int `q:"limit" json:"-"` +} + +// ToClaimCreateRequest assembles a body and URL for a Create request based on +// the contents of a CreateOpts. +func (opts CreateOpts) ToClaimCreateRequest() (map[string]interface{}, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, q.String(), err + } + + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return b, "", err + } + return b, q.String(), err +} + +// Create creates a Claim that claims messages on a specified queue. +func Create(client *gophercloud.ServiceClient, queueName string, opts CreateOptsBuilder) (r CreateResult) { + b, q, err := opts.ToClaimCreateRequest() + if err != nil { + r.Err = err + return + } + + url := createURL(client, queueName) + if q != "" { + url += q + } + + var resp *http.Response + resp, r.Err = client.Post(url, b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{201, 204}, + }) + // If the Claim has no content return an empty CreateResult + if resp.StatusCode == 204 { + r.Body = CreateResult{} + } else { + r.Body = resp.Body + } + return +} + +// Get queries the specified claim for the specified queue. +func Get(client *gophercloud.ServiceClient, queueName string, claimID string) (r GetResult) { + _, r.Err = client.Get(getURL(client, queueName, claimID), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToClaimUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts implements UpdateOpts. +type UpdateOpts struct { + // Update the TTL for the specified Claim. + TTL int `json:"ttl,omitempty"` + + // Update the grace period for Messages in a specified Claim. + Grace int `json:"grace,omitempty"` +} + +// ToClaimUpdateMap assembles a request body based on the contents of +// UpdateOpts. +func (opts UpdateOpts) ToClaimUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + return b, nil +} + +// Update will update the options for a specified claim. +func Update(client *gophercloud.ServiceClient, queueName string, claimID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToClaimUpdateMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Patch(updateURL(client, queueName, claimID), &b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// Delete will delete a Claim for a specified Queue. +func Delete(client *gophercloud.ServiceClient, queueName string, claimID string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, queueName, claimID), &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/results.go new file mode 100644 index 000000000000..ec43e582c0ac --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/results.go @@ -0,0 +1,52 @@ +package claims + +import "github.com/gophercloud/gophercloud" + +func (r CreateResult) Extract() ([]Messages, error) { + var s struct { + Messages []Messages `json:"messages"` + } + err := r.ExtractInto(&s) + return s.Messages, err +} + +func (r GetResult) Extract() (*Claim, error) { + var s *Claim + err := r.ExtractInto(&s) + return s, err +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + gophercloud.Result +} + +// GetResult is the response of a Get operations. +type GetResult struct { + gophercloud.Result +} + +// UpdateResult is the response of a Update operations. +type UpdateResult struct { + gophercloud.ErrResult +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +type Messages struct { + Age float32 `json:"age"` + Href string `json:"href"` + TTL int `json:"ttl"` + Body map[string]interface{} `json:"body"` +} + +type Claim struct { + Age float32 `json:"age"` + Href string `json:"href"` + Messages []Messages `json:"messages"` + TTL int `json:"ttl"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/doc.go new file mode 100644 index 000000000000..787309df54ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/doc.go @@ -0,0 +1,2 @@ +// Claims unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/fixtures.go new file mode 100644 index 000000000000..95ef1468a6b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/fixtures.go @@ -0,0 +1,133 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/claims" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// QueueName is the name of the queue +var QueueName = "FakeTestQueue" + +var ClaimID = "51db7067821e727dc24df754" + +// CreateClaimResponse is a sample response to a create claim +const CreateClaimResponse = ` +{ + "messages": [ + { + "body": {"event": "BackupStarted"}, + "href": "/v2/queues/FakeTestQueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", + "age": 57, + "ttl": 300 + } + ] +}` + +// GetClaimResponse is a sample response to a get claim +const GetClaimResponse = ` +{ + "age": 50, + "href": "/v2/queues/demoqueue/claims/51db7067821e727dc24df754", + "messages": [ + { + "body": {"event": "BackupStarted"}, + "href": "/v2/queues/FakeTestQueue/messages/51db6f78c508f17ddc924357?claim_id=51db7067821e727dc24df754", + "age": 57, + "ttl": 300 + } + ], + "ttl": 50 +}` + +// CreateClaimRequest is a sample request to create a claim. +const CreateClaimRequest = ` +{ + "ttl": 3600, + "grace": 3600 +}` + +// UpdateClaimRequest is a sample request to update a claim. +const UpdateClaimRequest = ` +{ + "ttl": 1200, + "grace": 1600 +}` + +// CreatedClaim is the result of a create request. +var CreatedClaim = []claims.Messages{ + { + Age: 57, + Href: fmt.Sprintf("/v2/queues/%s/messages/51db6f78c508f17ddc924357?claim_id=%s", QueueName, ClaimID), + TTL: 300, + Body: map[string]interface{}{"event": "BackupStarted"}, + }, +} + +// FirstClaim is the result of a get claim. +var FirstClaim = claims.Claim{ + Age: 50, + Href: "/v2/queues/demoqueue/claims/51db7067821e727dc24df754", + Messages: []claims.Messages{ + { + Age: 57, + Href: fmt.Sprintf("/v2/queues/%s/messages/51db6f78c508f17ddc924357?claim_id=%s", QueueName, ClaimID), + TTL: 300, + Body: map[string]interface{}{"event": "BackupStarted"}, + }, + }, + TTL: 50, +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request. +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/claims", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateClaimRequest) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateClaimResponse) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/claims/%s", QueueName, ClaimID), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetClaimResponse) + }) +} + +// HandleUpdateSuccessfully configures the test server to respond to a Update request. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/claims/%s", QueueName, ClaimID), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, UpdateClaimRequest) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to an Delete request. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/claims/%s", QueueName, ClaimID), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/requests_test.go new file mode 100644 index 000000000000..e3ee3d39a0af --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/testing/requests_test.go @@ -0,0 +1,58 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/claims" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + createOpts := claims.CreateOpts{ + TTL: 3600, + Grace: 3600, + Limit: 10, + } + + actual, err := claims.Create(fake.ServiceClient(), QueueName, createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, CreatedClaim, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := claims.Get(fake.ServiceClient(), QueueName, ClaimID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &FirstClaim, actual) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + updateOpts := claims.UpdateOpts{ + Grace: 1600, + TTL: 1200, + } + + err := claims.Update(fake.ServiceClient(), QueueName, ClaimID, updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := claims.Delete(fake.ServiceClient(), QueueName, ClaimID).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/urls.go new file mode 100644 index 000000000000..fae8fb4f45ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/claims/urls.go @@ -0,0 +1,24 @@ +package claims + +import "github.com/gophercloud/gophercloud" + +const ( + apiVersion = "v2" + apiName = "queues" +) + +func createURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "claims") +} + +func getURL(client *gophercloud.ServiceClient, queueName string, claimID string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "claims", claimID) +} + +func updateURL(client *gophercloud.ServiceClient, queueName string, claimID string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "claims", claimID) +} + +func deleteURL(client *gophercloud.ServiceClient, queueName string, claimID string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "claims", claimID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/doc.go new file mode 100644 index 000000000000..e5f5bce60fd8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/doc.go @@ -0,0 +1,121 @@ +/* +Package messages provides information and interaction with the messages through +the OpenStack Messaging(Zaqar) service. + +Example to List Messages + + listOpts := messages.ListOpts{ + Limit: 10, + } + + queueName := "my_queue" + + pager := messages.List(client, queueName, listOpts) + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + allMessages, err := queues.ExtractQueues(page) + if err != nil { + panic(err) + } + + for _, message := range allMessages { + fmt.Printf("%+v\n", message) + } + + return true, nil + }) + +Example to Create Messages + + queueName = "my_queue" + + createOpts := messages.CreateOpts{ + Messages: []messages.Messages{ + { + TTL: 300, + Delay: 20, + Body: map[string]interface{}{ + "event": "BackupStarted", + "backup_id": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce", + }, + }, + { + Body: map[string]interface{}{ + "event": "BackupProgress", + "current_bytes": "0", + "total_bytes": "99614720", + }, + }, + }, + } + + resources, err := messages.Create(client, queueName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a set of Messages + + queueName := "my_queue" + + getMessageOpts := messages.GetMessagesOpts{ + IDs: "123456", + } + + messagesList, err := messages.GetMessages(client, createdQueueName, getMessageOpts).Extract() + if err != nil { + panic(err) + } + +Example to get a singular Message + + queueName := "my_queue" + messageID := "123456" + + message, err := messages.Get(client, queueName, messageID).Extract() + if err != nil { + panic(err) + } + +Example to Delete a set of Messages + + queueName := "my_queue" + + deleteMessagesOpts := messages.DeleteMessagesOpts{ + IDs: []string{"9988776655"}, + } + + err := messages.DeleteMessages(client, queueName, deleteMessagesOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Pop a set of Messages + + queueName := "my_queue" + + popMessagesOpts := messages.PopMessagesOpts{ + Pop: 5, + } + + resources, err := messages.PopMessages(client, queueName, popMessagesOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a singular Message + + clientID := "3381af92-2b9e-11e3-b191-71861300734d" + queueName := "my_queue" + messageID := "123456" + + deleteOpts := messages.DeleteOpts{ + ClaimID: "12345", + } + + err := messages.Delete(client), queueName, messageID, deleteOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package messages diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/requests.go new file mode 100644 index 000000000000..6aac4ac35c15 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/requests.go @@ -0,0 +1,253 @@ +package messages + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToMessageListQuery() (string, error) +} + +// ListOpts params to be used with List. +type ListOpts struct { + // Limit instructs List to refrain from sending excessively large lists of queues + Limit int `q:"limit,omitempty"` + + // Marker and Limit control paging. Marker instructs List where to start listing from. + Marker string `q:"marker,omitempty"` + + // Indicate if the messages can be echoed back to the client that posted them. + Echo bool `q:"echo,omitempty"` + + // Indicate if the messages list should include the claimed messages. + IncludeClaimed bool `q:"include_claimed,omitempty"` + + //Indicate if the messages list should include the delayed messages. + IncludeDelayed bool `q:"include_delayed,omitempty"` +} + +func (opts ListOpts) ToMessageListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMessages lists messages on a specific queue based off queue name. +func List(client *gophercloud.ServiceClient, queueName string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, queueName) + if opts != nil { + query, err := opts.ToMessageListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + pager := pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return MessagePage{pagination.LinkedPageBase{PageResult: r}} + }) + return pager +} + +// CreateOptsBuilder Builder. +type CreateOptsBuilder interface { + ToMessageCreateMap() (map[string]interface{}, error) +} + +// BatchCreateOpts is an array of CreateOpts. +type BatchCreateOpts []CreateOpts + +// CreateOpts params to be used with Create. +type CreateOpts struct { + // TTL specifies how long the server waits before marking the message + // as expired and removing it from the queue. + TTL int `json:"ttl,omitempty"` + + // Delay specifies how long the message can be claimed. + Delay int `json:"delay,omitempty"` + + // Body specifies an arbitrary document that constitutes the body of the message being sent. + Body map[string]interface{} `json:"body" required:"true"` +} + +// ToMessageCreateMap constructs a request body from BatchCreateOpts. +func (opts BatchCreateOpts) ToMessageCreateMap() (map[string]interface{}, error) { + messages := make([]map[string]interface{}, len(opts)) + for i, message := range opts { + messageMap, err := message.ToMap() + if err != nil { + return nil, err + } + messages[i] = messageMap + } + return map[string]interface{}{"messages": messages}, nil +} + +// ToMap constructs a request body from UpdateOpts. +func (opts CreateOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Create creates a message on a specific queue based of off queue name. +func Create(client *gophercloud.ServiceClient, queueName string, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToMessageCreateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(createURL(client, queueName), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// DeleteMessagesOptsBuilder allows extensions to add additional parameters to the +// DeleteMessages request. +type DeleteMessagesOptsBuilder interface { + ToMessagesDeleteQuery() (string, error) +} + +// DeleteMessagesOpts params to be used with DeleteMessages. +type DeleteMessagesOpts struct { + IDs []string `q:"ids,omitempty"` +} + +// ToMessagesDeleteQuery formats a DeleteMessagesOpts structure into a query string. +func (opts DeleteMessagesOpts) ToMessagesDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// DeleteMessages deletes multiple messages based off of ID. +func DeleteMessages(client *gophercloud.ServiceClient, queueName string, opts DeleteMessagesOptsBuilder) (r DeleteResult) { + url := deleteURL(client, queueName) + if opts != nil { + query, err := opts.ToMessagesDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + return +} + +// PopMessagesOptsBuilder allows extensions to add additional parameters to the +// DeleteMessages request. +type PopMessagesOptsBuilder interface { + ToMessagesPopQuery() (string, error) +} + +// PopMessagesOpts params to be used with PopMessages. +type PopMessagesOpts struct { + Pop int `q:"pop,omitempty"` +} + +// ToMessagesPopQuery formats a PopMessagesOpts structure into a query string. +func (opts PopMessagesOpts) ToMessagesPopQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// PopMessages deletes and returns multiple messages based off of number of messages. +func PopMessages(client *gophercloud.ServiceClient, queueName string, opts PopMessagesOptsBuilder) (r PopResult) { + url := deleteURL(client, queueName) + if opts != nil { + query, err := opts.ToMessagesPopQuery() + if err != nil { + r.Err = err + return + } + url += query + } + result, err := client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{200, 204}, + }) + r.Body = result.Body + r.Header = result.Header + r.Err = err + return +} + +// GetMessagesOptsBuilder allows extensions to add additional parameters to the +// GetMessages request. +type GetMessagesOptsBuilder interface { + ToGetMessagesListQuery() (string, error) +} + +// GetMessagesOpts params to be used with GetMessages. +type GetMessagesOpts struct { + IDs []string `q:"ids,omitempty"` +} + +// ToGetMessagesListQuery formats a GetMessagesOpts structure into a query string. +func (opts GetMessagesOpts) ToGetMessagesListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// GetMessages requests details on a multiple messages, by IDs. +func GetMessages(client *gophercloud.ServiceClient, queueName string, opts GetMessagesOptsBuilder) (r GetMessagesResult) { + url := getURL(client, queueName) + if opts != nil { + query, err := opts.ToGetMessagesListQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = client.Get(url, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get requests details on a single message, by ID. +func Get(client *gophercloud.ServiceClient, queueName string, messageID string) (r GetResult) { + _, r.Err = client.Get(messageURL(client, queueName, messageID), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// delete request. +type DeleteOptsBuilder interface { + ToMessageDeleteQuery() (string, error) +} + +// DeleteOpts params to be used with Delete. +type DeleteOpts struct { + // ClaimID instructs Delete to delete a message that is associated with a claim ID + ClaimID string `q:"claim_id,omitempty"` +} + +// ToMessageDeleteQuery formats a DeleteOpts structure into a query string. +func (opts DeleteOpts) ToMessageDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete deletes a specific message from the queue. +func Delete(client *gophercloud.ServiceClient, queueName string, messageID string, opts DeleteOptsBuilder) (r DeleteResult) { + url := DeleteMessageURL(client, queueName, messageID) + if opts != nil { + query, err := opts.ToMessageDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + _, r.Err = client.Delete(url, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/results.go new file mode 100644 index 000000000000..1c361e3f6dcb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/results.go @@ -0,0 +1,132 @@ +package messages + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// CreateResult is the response of a Create operations. +type CreateResult struct { + gophercloud.Result +} + +// MessagePage contains a single page of all clusters from a ListDetails call. +type MessagePage struct { + pagination.LinkedPageBase +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult is the response of a Create operations. +type PopResult struct { + gophercloud.Result +} + +// GetMessagesResult is the response of a GetMessages operations. +type GetMessagesResult struct { + gophercloud.Result +} + +// GetResult is the response of a Get operations. +type GetResult struct { + gophercloud.Result +} + +// Message represents a message on a queue. +type Message struct { + Body map[string]interface{} `json:"body"` + Age int `json:"age"` + Href string `json:"href"` + ID string `json:"id"` + TTL int `json:"ttl"` + Checksum string `json:"checksum"` +} + +// PopMessage represents a message returned from PopMessages. +type PopMessage struct { + Body map[string]interface{} `json:"body"` + Age int `json:"age"` + ID string `json:"id"` + TTL int `json:"ttl"` + ClaimCount int `json:"claim_count"` + ClaimID string `json:"claim_id"` +} + +// ResourceList represents the result of creating a message. +type ResourceList struct { + Resources []string `json:"resources"` +} + +// Extract interprets any CreateResult as a ResourceList. +func (r CreateResult) Extract() (ResourceList, error) { + var s ResourceList + err := r.ExtractInto(&s) + return s, err +} + +// Extract interprets any PopResult as a list of PopMessage. +func (r PopResult) Extract() ([]PopMessage, error) { + var s struct { + PopMessages []PopMessage `json:"messages"` + } + err := r.ExtractInto(&s) + return s.PopMessages, err +} + +// Extract interprets any GetMessagesResult as a list of Message. +func (r GetMessagesResult) Extract() ([]Message, error) { + var s struct { + Messages []Message `json:"messages"` + } + err := r.ExtractInto(&s) + return s.Messages, err +} + +// Extract interprets any GetResult as a Message. +func (r GetResult) Extract() (Message, error) { + var s Message + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMessage extracts message into a list of Message. +func ExtractMessages(r pagination.Page) ([]Message, error) { + var s struct { + Messages []Message `json:"messages"` + } + err := (r.(MessagePage)).ExtractInto(&s) + return s.Messages, err +} + +// IsEmpty determines if a MessagePage contains any results. +func (r MessagePage) IsEmpty() (bool, error) { + s, err := ExtractMessages(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r MessagePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + next, err := gophercloud.ExtractNextURL(s.Links) + if err != nil { + return "", err + } + return nextPageURL(r.URL.String(), next) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/doc.go new file mode 100644 index 000000000000..05931e02352b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/doc.go @@ -0,0 +1,2 @@ +// messages unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/fixtures.go new file mode 100644 index 000000000000..000f887ffa6a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/fixtures.go @@ -0,0 +1,316 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/messages" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// QueueName is the name of the queue +var QueueName = "FakeTestQueue" + +// MessageID is the id of the message +var MessageID = "9988776655" + +// CreateMessageResponse is a sample response to a Create message. +const CreateMessageResponse = ` +{ + "resources": [ + "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357", + "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924358" + ] +}` + +// CreateMessageRequest is a sample request to create a message. +const CreateMessageRequest = ` +{ + "messages": [ + { + "body": { + "backup_id": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce", + "event": "BackupStarted" + }, + "delay": 20, + "ttl": 300 + }, + { + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + } + } + ] +}` + +// ListMessagesResponse is a sample response to list messages. +const ListMessagesResponse1 = ` +{ + "messages": [ + { + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + }, + "age": 482, + "href": "/v2/queues/FakeTestQueue/messages/578edfe6508f153f256f717b", + "id": "578edfe6508f153f256f717b", + "ttl": 3600, + "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" + } + ], + "links": [ + { + "href": "/v2/queues/FakeTestQueue/messages?marker=1", + "rel": "next" + } + ] +}` + +// ListMessagesResponse is a sample response to list messages. +const ListMessagesResponse2 = ` +{ + "messages": [ + { + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + }, + "age": 456, + "href": "/v2/queues/FakeTestQueue/messages/578ee000508f153f256f717d", + "id": "578ee000508f153f256f717d", + "ttl": 3600, + "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" + } + ], + "links": [ + { + "href": "/v2/queues/FakeTestQueue/messages?marker=2", + "rel": "next" + } + ] + +}` + +// GetMessagesResponse is a sample response to GetMessages. +const GetMessagesResponse = ` +{ + "messages": [ + { + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + }, + "age": 443, + "href": "/v2/queues/beijing/messages/578f0055508f153f256f717f", + "id": "578f0055508f153f256f717f", + "ttl": 3600 + } + ] +}` + +// GetMessageResponse is a sample response to Get. +const GetMessageResponse = ` +{ + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + }, + "age": 482, + "href": "/v2/queues/FakeTestQueue/messages/578edfe6508f153f256f717b", + "id": "578edfe6508f153f256f717b", + "ttl": 3600, + "checksum": "MD5:abf7213555626e29c3cb3e5dc58b3515" +}` + +// PopMessageResponse is a sample reponse to pop messages +const PopMessageResponse = ` +{ + "messages": [ + { + "body": { + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720" + }, + "age": 20, + "ttl": 120, + "claim_count": 55, + "claim_id": "123456", + "id": "5ae7972599352b436763aee7" + } + ] +}` + +// ExpectedResources is the expected result in Create +var ExpectedResources = messages.ResourceList{ + Resources: []string{ + "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924357", + "/v2/queues/demoqueue/messages/51db6f78c508f17ddc924358", + }, +} + +// FirstMessage is the first result in a List. +var FirstMessage = messages.Message{ + Body: map[string]interface{}{ + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720", + }, + Age: 482, + Href: fmt.Sprintf("/v2/queues/%s/messages/578edfe6508f153f256f717b", QueueName), + ID: "578edfe6508f153f256f717b", + TTL: 3600, + Checksum: "MD5:abf7213555626e29c3cb3e5dc58b3515", +} + +// SecondMessage is the second result in a List. +var SecondMessage = messages.Message{ + Body: map[string]interface{}{ + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720", + }, + Age: 456, + Href: fmt.Sprintf("/v2/queues/%s/messages/578ee000508f153f256f717d", QueueName), + ID: "578ee000508f153f256f717d", + TTL: 3600, + Checksum: "MD5:abf7213555626e29c3cb3e5dc58b3515", +} + +// ExpectedMessagesSlice is the expected result in a List. +var ExpectedMessagesSlice = [][]messages.Message{{FirstMessage}, {SecondMessage}} + +// ExpectedMessagesSet is the expected result in GetMessages +var ExpectedMessagesSet = []messages.Message{ + { + Body: map[string]interface{}{ + "total_bytes": "99614720", + "current_bytes": "0", + "event": "BackupProgress", + }, + Age: 443, + Href: "/v2/queues/beijing/messages/578f0055508f153f256f717f", + ID: "578f0055508f153f256f717f", + TTL: 3600, + Checksum: "", + }, +} + +// ExpectedPopMessage is the expected result of a Pop. +var ExpectedPopMessage = []messages.PopMessage{{ + Body: map[string]interface{}{ + "current_bytes": "0", + "event": "BackupProgress", + "total_bytes": "99614720", + }, + Age: 20, + TTL: 120, + ClaimID: "123456", + ClaimCount: 55, + ID: "5ae7972599352b436763aee7", +}} + +// HandleCreateSuccessfully configures the test server to respond to a Create request. +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateMessageRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, CreateMessageResponse) + }) +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + next := r.RequestURI + + switch next { + case fmt.Sprintf("/v2/queues/%s/messages?limit=1", QueueName): + fmt.Fprintf(w, ListMessagesResponse1) + case fmt.Sprintf("/v2/queues/%s/messages?marker=1", QueueName): + fmt.Fprint(w, ListMessagesResponse2) + case fmt.Sprintf("/v2/queues/%s/messages?marker=2", QueueName): + fmt.Fprint(w, `{ "messages": [] }`) + } + }) +} + +// HandleGetMessagesSuccessfully configures the test server to respond to a GetMessages request. +func HandleGetMessagesSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetMessagesResponse) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages/%s", QueueName, MessageID), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetMessageResponse) + }) +} + +// HandleDeleteMessagesSuccessfully configures the test server to respond to a Delete request. +func HandleDeleteMessagesSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandlePopSuccessfully configures the test server to respond to a Pop request. +func HandlePopSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, PopMessageResponse) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/messages/%s", QueueName, MessageID), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/requests_test.go new file mode 100644 index 000000000000..eb839262b9a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/testing/requests_test.go @@ -0,0 +1,126 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/messages" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + listOpts := messages.ListOpts{ + Limit: 1, + } + + count := 0 + err := messages.List(fake.ServiceClient(), QueueName, listOpts).EachPage(func(page pagination.Page) (bool, error) { + actual, err := messages.ExtractMessages(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedMessagesSlice[count], actual) + count++ + + return true, nil + }) + th.AssertNoErr(t, err) + + th.CheckEquals(t, 2, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + createOpts := messages.BatchCreateOpts{ + messages.CreateOpts{ + TTL: 300, + Delay: 20, + Body: map[string]interface{}{ + "event": "BackupStarted", + "backup_id": "c378813c-3f0b-11e2-ad92-7823d2b0f3ce", + }, + }, + messages.CreateOpts{ + Body: map[string]interface{}{ + "event": "BackupProgress", + "current_bytes": "0", + "total_bytes": "99614720", + }, + }, + } + + actual, err := messages.Create(fake.ServiceClient(), QueueName, createOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedResources, actual) +} + +func TestGetMessages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetMessagesSuccessfully(t) + + getMessagesOpts := messages.GetMessagesOpts{ + IDs: []string{"9988776655"}, + } + + actual, err := messages.GetMessages(fake.ServiceClient(), QueueName, getMessagesOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedMessagesSet, actual) +} + +func TestGetMessage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := messages.Get(fake.ServiceClient(), QueueName, MessageID).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, FirstMessage, actual) +} + +func TestDeleteMessages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteMessagesSuccessfully(t) + + deleteMessagesOpts := messages.DeleteMessagesOpts{ + IDs: []string{"9988776655"}, + } + + err := messages.DeleteMessages(fake.ServiceClient(), QueueName, deleteMessagesOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestPopMessages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePopSuccessfully(t) + + popMessagesOpts := messages.PopMessagesOpts{ + Pop: 1, + } + + actual, err := messages.PopMessages(fake.ServiceClient(), QueueName, popMessagesOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedPopMessage, actual) +} + +func TestDeleteMessage(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + deleteOpts := messages.DeleteOpts{ + ClaimID: "12345", + } + + err := messages.Delete(fake.ServiceClient(), QueueName, MessageID, deleteOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/urls.go new file mode 100644 index 000000000000..a00dd5620d25 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/messages/urls.go @@ -0,0 +1,49 @@ +package messages + +import ( + "net/url" + + "github.com/gophercloud/gophercloud" +) + +const ( + apiVersion = "v2" + apiName = "queues" +) + +func createURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages") +} + +func listURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages") +} + +func getURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages") +} + +func deleteURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages") +} + +func DeleteMessageURL(client *gophercloud.ServiceClient, queueName string, messageID string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages", messageID) +} + +func messageURL(client *gophercloud.ServiceClient, queueName string, messageID string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "messages", messageID) +} + +// Builds next page full url based on current url. +func nextPageURL(currentURL string, next string) (string, error) { + base, err := url.Parse(currentURL) + if err != nil { + return "", err + } + rel, err := url.Parse(next) + if err != nil { + return "", err + } + return base.ResolveReference(rel).String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/doc.go new file mode 100644 index 000000000000..ca97c52a8a34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/doc.go @@ -0,0 +1,111 @@ +/* +Package queues provides information and interaction with the queues through +the OpenStack Messaging (Zaqar) service. + +Lists all queues and creates, shows information for updates, deletes, and actions on a queue. + +Example to List Queues + + listOpts := queues.ListOpts{ + Limit: 10, + } + + pager := queues.List(client, listOpts) + + err = pager.EachPage(func(page pagination.Page) (bool, error) { + queues, err := queues.ExtractQueues(page) + if err != nil { + panic(err) + } + + for _, queue := range queues { + fmt.Printf("%+v\n", queue) + } + + return true, nil + }) + +Example to Create a Queue + + createOpts := queues.CreateOpts{ + QueueName: "My_Queue", + MaxMessagesPostSize: 262143, + DefaultMessageTTL: 3700, + DefaultMessageDelay: 25, + DeadLetterQueueMessageTTL: 3500, + MaxClaimCount: 10, + Extra: map[string]interface{}{"description": "Test queue."}, + } + + err := queues.Create(client, createOpts).ExtractErr() + if err != nil { + panic(err) + } + +Example to Update a Queue + + updateOpts := queues.BatchUpdateOpts{ + queues.UpdateOpts{ + Op: "replace", + Path: "/metadata/_max_claim_count", + Value: 15, + }, + queues.UpdateOpts{ + Op: "replace", + Path: "/metadata/description", + Value: "Updated description test queue.", + }, + } + + updateResult, err := queues.Update(client, queueName, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Get a Queue + + queue, err := queues.Get(client, queueName).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Queue + + err := queues.Delete(client, queueName).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get Message Stats of a Queue + + queueStats, err := queues.GetStats(client, queueName).Extract() + if err != nil { + panic(err) + } + +Example to Share a queue + + shareOpts := queues.ShareOpts{ + Paths: []queues.SharePath{queues.ShareMessages}, + Methods: []queues.ShareMethod{queues.MethodGet}, + } + + queueShare, err := queues.Share(client, queueName, shareOpts).Extract() + if err != nil { + panic(err) + } + +Example to Purge a queue + + purgeOpts := queues.PurgeOpts{ + ResourceTypes: []queues.PurgeResource{ + queues.ResourceMessages, + }, + } + + err := queues.Purge(client, queueName, purgeOpts).ExtractErr() + if err != nil { + panic(err) + } +*/ +package queues diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/requests.go new file mode 100644 index 000000000000..6ef53947d7fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/requests.go @@ -0,0 +1,292 @@ +package queues + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToQueueListQuery() (string, error) +} + +// ListOpts params to be used with List +type ListOpts struct { + // Limit instructs List to refrain from sending excessively large lists of queues + Limit int `q:"limit,omitempty"` + + // Marker and Limit control paging. Marker instructs List where to start listing from. + Marker string `q:"marker,omitempty"` + + // Specifies if showing the detailed information when querying queues + Detailed bool `q:"detailed,omitempty"` +} + +// ToQueueListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToQueueListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List instructs OpenStack to provide a list of queues. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToQueueListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + pager := pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return QueuePage{pagination.LinkedPageBase{PageResult: r}} + + }) + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToQueueCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies the queue creation parameters. +type CreateOpts struct { + // The name of the queue to create. + QueueName string `json:"queue_name" required:"true"` + + // The target incoming messages will be moved to when a message can’t + // processed successfully after meet the max claim count is met. + DeadLetterQueue string `json:"_dead_letter_queue,omitempty"` + + // The new TTL setting for messages when moved to dead letter queue. + DeadLetterQueueMessagesTTL int `json:"_dead_letter_queue_messages_ttl,omitempty"` + + // The delay of messages defined for a queue. When the messages send to + // the queue, it will be delayed for some times and means it can not be + // claimed until the delay expired. + DefaultMessageDelay int `json:"_default_message_delay,omitempty"` + + // The default TTL of messages defined for a queue, which will effect for + // any messages posted to the queue. + DefaultMessageTTL int `json:"_default_message_ttl" required:"true"` + + // The flavor name which can tell Zaqar which storage pool will be used + // to create the queue. + Flavor string `json:"_flavor,omitempty"` + + // The max number the message can be claimed. + MaxClaimCount int `json:"_max_claim_count,omitempty"` + + // The max post size of messages defined for a queue, which will effect + // for any messages posted to the queue. + MaxMessagesPostSize int `json:"_max_messages_post_size,omitempty"` + + // Extra is free-form extra key/value pairs to describe the queue. + Extra map[string]interface{} `json:"-"` +} + +// ToQueueCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToQueueCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if opts.Extra != nil { + for key, value := range opts.Extra { + b[key] = value + } + + } + return b, nil +} + +// Create requests the creation of a new queue. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToQueueCreateMap() + if err != nil { + r.Err = err + return + } + + queueName := b["queue_name"].(string) + delete(b, "queue_name") + + _, r.Err = client.Put(createURL(client, queueName), b, r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201, 204}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// update request. +type UpdateOptsBuilder interface { + ToQueueUpdateMap() ([]map[string]interface{}, error) +} + +// BatchUpdateOpts is an array of UpdateOpts. +type BatchUpdateOpts []UpdateOpts + +// UpdateOpts is the struct responsible for updating a property of a queue. +type UpdateOpts struct { + Op UpdateOp `json:"op" required:"true"` + Path string `json:"path" required:"true"` + Value interface{} `json:"value" required:"true"` +} + +type UpdateOp string + +const ( + ReplaceOp UpdateOp = "replace" + AddOp UpdateOp = "add" + RemoveOp UpdateOp = "remove" +) + +// ToQueueUpdateMap constructs a request body from UpdateOpts. +func (opts BatchUpdateOpts) ToQueueUpdateMap() ([]map[string]interface{}, error) { + queuesUpdates := make([]map[string]interface{}, len(opts)) + for i, queue := range opts { + queueMap, err := queue.ToMap() + if err != nil { + return nil, err + } + queuesUpdates[i] = queueMap + } + return queuesUpdates, nil +} + +// ToMap constructs a request body from UpdateOpts. +func (opts UpdateOpts) ToMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Update Updates the specified queue. +func Update(client *gophercloud.ServiceClient, queueName string, opts UpdateOptsBuilder) (r UpdateResult) { + _, r.Err = client.Patch(updateURL(client, queueName), opts, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 204}, + MoreHeaders: map[string]string{ + "Content-Type": "application/openstack-messaging-v2.0-json-patch"}, + }) + return +} + +// Get requests details on a single queue, by name. +func Get(client *gophercloud.ServiceClient, queueName string) (r GetResult) { + _, r.Err = client.Get(getURL(client, queueName), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete deletes the specified queue. +func Delete(client *gophercloud.ServiceClient, queueName string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, queueName), &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} + +// GetStats returns statistics for the specified queue. +func GetStats(client *gophercloud.ServiceClient, queueName string) (r StatResult) { + _, r.Err = client.Get(statURL(client, queueName), &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}}) + return +} + +type SharePath string + +const ( + PathMessages SharePath = "messages" + PathClaims SharePath = "claims" + PathSubscriptions SharePath = "subscriptions" +) + +type ShareMethod string + +const ( + MethodGet ShareMethod = "GET" + MethodPatch ShareMethod = "PATCH" + MethodPost ShareMethod = "POST" + MethodPut ShareMethod = "PUT" +) + +// ShareOpts specifies share creation parameters. +type ShareOpts struct { + Paths []SharePath `json:"paths,omitempty"` + Methods []ShareMethod `json:"methods,omitempty"` + Expires string `json:"expires,omitempty"` +} + +// ShareOptsBuilder allows extensions to add additional attributes to the +// Share request. +type ShareOptsBuilder interface { + ToQueueShareMap() (map[string]interface{}, error) +} + +// ToShareQueueMap formats a ShareOpts structure into a request body. +func (opts ShareOpts) ToQueueShareMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + return b, nil +} + +// Share creates a pre-signed URL for a given queue. +func Share(client *gophercloud.ServiceClient, queueName string, opts ShareOptsBuilder) (r ShareResult) { + b, err := opts.ToQueueShareMap() + if err != nil { + r.Err = err + return r + } + _, r.Err = client.Post(shareURL(client, queueName), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +type PurgeResource string + +const ( + ResourceMessages PurgeResource = "messages" + ResourceSubscriptions PurgeResource = "subscriptions" +) + +// PurgeOpts specifies the purge parameters. +type PurgeOpts struct { + ResourceTypes []PurgeResource `json:"resource_types" required:"true"` +} + +// PurgeOptsBuilder allows extensions to add additional attributes to the +// Purge request. +type PurgeOptsBuilder interface { + ToQueuePurgeMap() (map[string]interface{}, error) +} + +// ToPurgeQueueMap formats a PurgeOpts structure into a request body +func (opts PurgeOpts) ToQueuePurgeMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + return b, nil +} + +// Purge purges particular resource of the queue. +func Purge(client *gophercloud.ServiceClient, queueName string, opts PurgeOptsBuilder) (r PurgeResult) { + b, err := opts.ToQueuePurgeMap() + if err != nil { + r.Err = err + return r + } + + _, r.Err = client.Post(purgeURL(client, queueName), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{204}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/results.go new file mode 100644 index 000000000000..61005da77854 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/results.go @@ -0,0 +1,201 @@ +package queues + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/internal" + "github.com/gophercloud/gophercloud/pagination" +) + +// commonResult is the response of a base result. +type commonResult struct { + gophercloud.Result +} + +// QueuePage contains a single page of all queues from a List operation. +type QueuePage struct { + pagination.LinkedPageBase +} + +// CreateResult is the response of a Create operation. +type CreateResult struct { + gophercloud.ErrResult +} + +// UpdateResult is the response of a Update operation. +type UpdateResult struct { + commonResult +} + +// GetResult is the response of a Get operation. +type GetResult struct { + commonResult +} + +// StatResult contains the result of a Share operation. +type StatResult struct { + gophercloud.Result +} + +// DeleteResult is the result from a Delete operation. Call its ExtractErr +// method to determine if the call succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ShareResult contains the result of a Share operation. +type ShareResult struct { + gophercloud.Result +} + +// PurgeResult is the response of a Purge operation. +type PurgeResult struct { + gophercloud.ErrResult +} + +// Queue represents a messaging queue. +type Queue struct { + Href string `json:"href"` + Methods []string `json:"methods"` + Name string `json:"name"` + Paths []string `json:"paths"` + ResourceTypes []string `json:"resource_types"` + Metadata QueueDetails `json:"metadata"` +} + +// QueueDetails represents the metadata of a queue. +type QueueDetails struct { + // The queue the message will be moved to when the message can’t + // be processed successfully after the max claim count is met. + DeadLetterQueue string `json:"_dead_letter_queue"` + + // The TTL setting for messages when moved to dead letter queue. + DeadLetterQueueMessageTTL int `json:"_dead_letter_queue_messages_ttl"` + + // The delay of messages defined for the queue. + DefaultMessageDelay int `json:"_default_message_delay"` + + // The default TTL of messages defined for the queue. + DefaultMessageTTL int `json:"_default_message_ttl"` + + // Extra is a collection of miscellaneous key/values. + Extra map[string]interface{} `json:"-"` + + // The max number the message can be claimed from the queue. + MaxClaimCount int `json:"_max_claim_count"` + + // The max post size of messages defined for the queue. + MaxMessagesPostSize int `json:"_max_messages_post_size"` + + // The flavor defined for the queue. + Flavor string `json:"flavor"` +} + +// Stats represents a stats response. +type Stats struct { + // Number of Claimed messages for a queue + Claimed int `json:"claimed"` + + // Total Messages for a queue + Total int `json:"total"` + + // Number of free messages + Free int `json:"free"` +} + +// QueueShare represents a share response. +type QueueShare struct { + Project string `json:"project"` + Paths []string `json:"paths"` + Expires string `json:"expires"` + Methods []string `json:"methods"` + Signature string `json:"signature"` +} + +// Extract interprets any commonResult as a Queue. +func (r commonResult) Extract() (QueueDetails, error) { + var s QueueDetails + err := r.ExtractInto(&s) + return s, err +} + +// Extract interprets any StatResult as a Stats. +func (r StatResult) Extract() (Stats, error) { + var s struct { + Stats Stats `json:"messages"` + } + err := r.ExtractInto(&s) + return s.Stats, err +} + +// Extract interprets any ShareResult as a QueueShare. +func (r ShareResult) Extract() (QueueShare, error) { + var s QueueShare + err := r.ExtractInto(&s) + return s, err +} + +// ExtractQueues interprets the results of a single page from a +// List() call, producing a map of queues. +func ExtractQueues(r pagination.Page) ([]Queue, error) { + var s struct { + Queues []Queue `json:"queues"` + } + err := (r.(QueuePage)).ExtractInto(&s) + return s.Queues, err +} + +// IsEmpty determines if a QueuesPage contains any results. +func (r QueuePage) IsEmpty() (bool, error) { + s, err := ExtractQueues(r) + return len(s) == 0, err +} + +// NextPageURL uses the response's embedded link reference to navigate to the +// next page of results. +func (r QueuePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + next, err := gophercloud.ExtractNextURL(s.Links) + if err != nil { + return "", err + } + return nextPageURL(r.URL.String(), next) +} + +func (r *QueueDetails) UnmarshalJSON(b []byte) error { + type tmp QueueDetails + var s struct { + tmp + Extra map[string]interface{} `json:"extra"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = QueueDetails(s.tmp) + + // Collect other fields and bundle them into Extra + // but only if a field titled "extra" wasn't sent. + if s.Extra != nil { + r.Extra = s.Extra + } else { + var result interface{} + err := json.Unmarshal(b, &result) + if err != nil { + return err + } + if resultMap, ok := result.(map[string]interface{}); ok { + r.Extra = internal.RemainingKeys(QueueDetails{}, resultMap) + } + } + + return err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/doc.go new file mode 100644 index 000000000000..093700883639 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/doc.go @@ -0,0 +1,2 @@ +// queues unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/fixtures.go new file mode 100644 index 000000000000..50e7bd4c9322 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/fixtures.go @@ -0,0 +1,315 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/queues" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// QueueName is the name of the queue +var QueueName = "FakeTestQueue" + +// CreateQueueRequest is a sample request to create a queue. +const CreateQueueRequest = ` +{ + "_max_messages_post_size": 262144, + "_default_message_ttl": 3600, + "_default_message_delay": 30, + "_dead_letter_queue": "dead_letter", + "_dead_letter_queue_messages_ttl": 3600, + "_max_claim_count": 10, + "description": "Queue for unit testing." +}` + +// CreateShareRequest is a sample request to a share. +const CreateShareRequest = ` +{ + "paths": ["messages", "claims", "subscriptions"], + "methods": ["GET", "POST", "PUT", "PATCH"], + "expires": "2016-09-01T00:00:00" +}` + +// CreatePurgeRequest is a sample request to a purge. +const CreatePurgeRequest = ` +{ + "resource_types": ["messages", "subscriptions"] +}` + +// ListQueuesResponse1 is a sample response to a List queues. +const ListQueuesResponse1 = ` +{ + "queues":[ + { + "href":"/v2/queues/london", + "name":"london", + "metadata":{ + "_dead_letter_queue":"fake_queue", + "_dead_letter_queue_messages_ttl":3500, + "_default_message_delay":25, + "_default_message_ttl":3700, + "_max_claim_count":10, + "_max_messages_post_size":262143, + "description":"Test queue." + } + } + ], + "links":[ + { + "href":"/v2/queues?marker=london", + "rel":"next" + } + ] +}` + +// ListQueuesResponse2 is a sample response to a List queues. +const ListQueuesResponse2 = ` +{ + "queues":[ + { + "href":"/v2/queues/beijing", + "name":"beijing", + "metadata":{ + "_dead_letter_queue":"fake_queue", + "_dead_letter_queue_messages_ttl":3500, + "_default_message_delay":25, + "_default_message_ttl":3700, + "_max_claim_count":10, + "_max_messages_post_size":262143, + "description":"Test queue." + } + } + ], + "links":[ + { + "href":"/v2/queues?marker=beijing", + "rel":"next" + } + ] +}` + +// UpdateQueueRequest is a sample request to update a queue. +const UpdateQueueRequest = ` +[ + { + "op": "replace", + "path": "/metadata/description", + "value": "Update queue description" + } +]` + +// UpdateQueueResponse is a sample response to a update queue. +const UpdateQueueResponse = ` +{ + "description": "Update queue description" +}` + +// GetQueueResponse is a sample response to a get queue. +const GetQueueResponse = ` +{ + "_max_messages_post_size": 262144, + "_default_message_ttl": 3600, + "description": "Queue used for unit testing." +}` + +// GetStatsResponse is a sample response to a stats request. +const GetStatsResponse = ` +{ + "messages":{ + "claimed": 10, + "total": 20, + "free": 10 + } +}` + +// CreateShareResponse is a sample response to a share request. +const CreateShareResponse = ` +{ + "project": "2887aabf368046a3bb0070f1c0413470", + "paths": [ + "/v2/queues/test/messages", + "/v2/queues/test/claims", + "/v2/queues/test/subscriptions" + ], + "expires": "2016-09-01T00:00:00", + "methods": [ + "GET", + "PATCH", + "POST", + "PUT" + ], + "signature": "6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799" +}` + +// FirstQueue is the first result in a List. +var FirstQueue = queues.Queue{ + Href: "/v2/queues/london", + Name: "london", + Metadata: queues.QueueDetails{ + DeadLetterQueue: "fake_queue", + DeadLetterQueueMessageTTL: 3500, + DefaultMessageDelay: 25, + DefaultMessageTTL: 3700, + MaxClaimCount: 10, + MaxMessagesPostSize: 262143, + Extra: map[string]interface{}{"description": "Test queue."}, + }, +} + +// SecondQueue is the second result in a List. +var SecondQueue = queues.Queue{ + Href: "/v2/queues/beijing", + Name: "beijing", + Metadata: queues.QueueDetails{ + DeadLetterQueue: "fake_queue", + DeadLetterQueueMessageTTL: 3500, + DefaultMessageDelay: 25, + DefaultMessageTTL: 3700, + MaxClaimCount: 10, + MaxMessagesPostSize: 262143, + Extra: map[string]interface{}{"description": "Test queue."}, + }, +} + +// ExpectedQueueSlice is the expected result in a List. +var ExpectedQueueSlice = [][]queues.Queue{{FirstQueue}, {SecondQueue}} + +// QueueDetails is the expected result in a Get. +var QueueDetails = queues.QueueDetails{ + DefaultMessageTTL: 3600, + MaxMessagesPostSize: 262144, + Extra: map[string]interface{}{"description": "Queue used for unit testing."}, +} + +// ExpectedStats is the expected result in a GetStats. +var ExpectedStats = queues.Stats{ + Claimed: 10, + Total: 20, + Free: 10, +} + +// ExpectedShare is the expected result in Share. +var ExpectedShare = queues.QueueShare{ + Project: "2887aabf368046a3bb0070f1c0413470", + Paths: []string{ + "/v2/queues/test/messages", + "/v2/queues/test/claims", + "/v2/queues/test/subscriptions", + }, + Expires: "2016-09-01T00:00:00", + Methods: []string{ + "GET", + "PATCH", + "POST", + "PUT", + }, + Signature: "6a63d63242ebd18c3518871dda6fdcb6273db2672c599bf985469241e9a1c799", +} + +// HandleListSuccessfully configures the test server to respond to a List request. +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2/queues", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + next := r.RequestURI + + switch next { + case "/v2/queues?limit=1": + fmt.Fprintf(w, ListQueuesResponse1) + case "/v2/queues?marker=london": + fmt.Fprint(w, ListQueuesResponse2) + case "/v2/queues?marker=beijing": + fmt.Fprint(w, `{ "queues": [] }`) + } + }) +} + +// HandleCreateSuccessfully configures the test server to respond to a Create request. +func HandleCreateSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateQueueRequest) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateSuccessfully configures the test server to respond to an Update request. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, UpdateQueueRequest) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, UpdateQueueResponse) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetQueueResponse) + }) +} + +// HandleDeleteSuccessfully configures the test server to respond to a Delete request. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetSuccessfully configures the test server to respond to a Get request. +func HandleGetStatsSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/stats", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, GetStatsResponse) + }) +} + +// HandleShareSuccessfully configures the test server to respond to a Share request. +func HandleShareSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/share", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreateShareRequest) + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, CreateShareResponse) + }) +} + +// HandlePurgeSuccessfully configures the test server to respond to a Purge request. +func HandlePurgeSuccessfully(t *testing.T) { + th.Mux.HandleFunc(fmt.Sprintf("/v2/queues/%s/purge", QueueName), + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestJSONRequest(t, r, CreatePurgeRequest) + + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/requests_test.go new file mode 100644 index 000000000000..c4502cdac818 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/testing/requests_test.go @@ -0,0 +1,133 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/messaging/v2/queues" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t) + + listOpts := queues.ListOpts{ + Limit: 1, + } + + count := 0 + err := queues.List(fake.ServiceClient(), listOpts).EachPage(func(page pagination.Page) (bool, error) { + actual, err := queues.ExtractQueues(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedQueueSlice[count], actual) + count++ + + return true, nil + }) + th.AssertNoErr(t, err) + + th.CheckEquals(t, 2, count) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t) + + createOpts := queues.CreateOpts{ + QueueName: QueueName, + MaxMessagesPostSize: 262144, + DefaultMessageTTL: 3600, + DefaultMessageDelay: 30, + DeadLetterQueue: "dead_letter", + DeadLetterQueueMessagesTTL: 3600, + MaxClaimCount: 10, + Extra: map[string]interface{}{"description": "Queue for unit testing."}, + } + + err := queues.Create(fake.ServiceClient(), createOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + updateOpts := queues.BatchUpdateOpts{ + queues.UpdateOpts{ + Op: queues.ReplaceOp, + Path: "/metadata/description", + Value: "Update queue description", + }, + } + updatedQueueResult := queues.QueueDetails{ + Extra: map[string]interface{}{"description": "Update queue description"}, + } + + actual, err := queues.Update(fake.ServiceClient(), QueueName, updateOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, updatedQueueResult, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t) + + actual, err := queues.Get(fake.ServiceClient(), QueueName).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, QueueDetails, actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := queues.Delete(fake.ServiceClient(), QueueName).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestGetStat(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetStatsSuccessfully(t) + + actual, err := queues.GetStats(fake.ServiceClient(), QueueName).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedStats, actual) +} + +func TestShare(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleShareSuccessfully(t) + + shareOpts := queues.ShareOpts{ + Paths: []queues.SharePath{queues.PathMessages, queues.PathClaims, queues.PathSubscriptions}, + Methods: []queues.ShareMethod{queues.MethodGet, queues.MethodPost, queues.MethodPut, queues.MethodPatch}, + Expires: "2016-09-01T00:00:00", + } + + actual, err := queues.Share(fake.ServiceClient(), QueueName, shareOpts).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedShare, actual) +} + +func TestPurge(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePurgeSuccessfully(t) + + purgeOpts := queues.PurgeOpts{ + ResourceTypes: []queues.PurgeResource{queues.ResourceMessages, queues.ResourceSubscriptions}, + } + + err := queues.Purge(fake.ServiceClient(), QueueName, purgeOpts).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/urls.go new file mode 100644 index 000000000000..6b5a0e325a55 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/messaging/v2/queues/urls.go @@ -0,0 +1,61 @@ +package queues + +import ( + "net/url" + + "github.com/gophercloud/gophercloud" +) + +const ( + apiVersion = "v2" + apiName = "queues" +) + +func commonURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL(apiVersion, apiName) +} + +func createURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName) +} + +func listURL(client *gophercloud.ServiceClient) string { + return commonURL(client) +} + +func updateURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName) +} + +func getURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName) +} + +func deleteURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName) +} + +func statURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "stats") +} + +func shareURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "share") +} + +func purgeURL(client *gophercloud.ServiceClient, queueName string) string { + return client.ServiceURL(apiVersion, apiName, queueName, "purge") +} + +// builds next page full url based on current url +func nextPageURL(currentURL string, next string) (string, error) { + base, err := url.Parse(currentURL) + if err != nil { + return "", err + } + rel, err := url.Parse(next) + if err != nil { + return "", err + } + return base.ResolveReference(rel).String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/doc.go new file mode 100644 index 000000000000..5461942b9209 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/doc.go @@ -0,0 +1,22 @@ +/* +Package apiversions provides information and interaction with the different +API versions for the OpenStack Neutron service. This functionality is not +restricted to this particular version. + +Example to List API Versions + + allPages, err := apiversions.ListVersions(networkingClient).AllPages() + if err != nil { + panic(err) + } + + allVersions, err := apiversions.ExtractAPIVersions(allPages) + if err != nil { + panic(err) + } + + for _, version := range allVersions { + fmt.Printf("%+v\n", version) + } +*/ +package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/requests.go new file mode 100644 index 000000000000..c5494fb357b3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/requests.go @@ -0,0 +1,22 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListVersions lists all the Neutron API versions available to end-users. +func ListVersions(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, apiVersionsURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// ListVersionResources lists all of the different API resources for a +// particular API versions. Typical resources for Neutron might be: networks, +// subnets, etc. +func ListVersionResources(c *gophercloud.ServiceClient, v string) pagination.Pager { + return pagination.NewPager(c, apiInfoURL(c, v), func(r pagination.PageResult) pagination.Page { + return APIVersionResourcePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/results.go new file mode 100644 index 000000000000..eff44855d7e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/results.go @@ -0,0 +1,66 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud/pagination" +) + +// APIVersion represents an API version for Neutron. It contains the status of +// the API, and its unique ID. +type APIVersion struct { + Status string `son:"status"` + ID string `json:"id"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + return len(is) == 0, err +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := (r.(APIVersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// APIVersionResource represents a generic API resource. It contains the name +// of the resource and its plural collection name. +type APIVersionResource struct { + Name string `json:"name"` + Collection string `json:"collection"` +} + +// APIVersionResourcePage is a concrete type which embeds the common +// SinglePageBase struct, and is used when traversing API versions collections. +type APIVersionResourcePage struct { + pagination.SinglePageBase +} + +// IsEmpty is a concrete function which indicates whether an +// APIVersionResourcePage is empty or not. +func (r APIVersionResourcePage) IsEmpty() (bool, error) { + is, err := ExtractVersionResources(r) + return len(is) == 0, err +} + +// ExtractVersionResources accepts a Page struct, specifically a +// APIVersionResourcePage struct, and extracts the elements into a slice of +// APIVersionResource structs. In other words, the collection is mapped into +// a relevant slice. +func ExtractVersionResources(r pagination.Page) ([]APIVersionResource, error) { + var s struct { + APIVersionResources []APIVersionResource `json:"resources"` + } + err := (r.(APIVersionResourcePage)).ExtractInto(&s) + return s.APIVersionResources, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/doc.go new file mode 100644 index 000000000000..cc76de0a6238 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/doc.go @@ -0,0 +1,2 @@ +// apiversions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/requests_test.go new file mode 100644 index 000000000000..5a66a2a09aef --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/testing/requests_test.go @@ -0,0 +1,183 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.0", + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0", + "rel": "self" + } + ] + } + ] +}`) + }) + + count := 0 + + apiversions.ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := apiversions.ExtractAPIVersions(page) + if err != nil { + t.Errorf("Failed to extract API versions: %v", err) + return false, err + } + + expected := []apiversions.APIVersion{ + { + Status: "CURRENT", + ID: "v2.0", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + apiversions.ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + if _, err := apiversions.ExtractAPIVersions(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} + +func TestAPIInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "resources": [ + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/subnets", + "rel": "self" + } + ], + "name": "subnet", + "collection": "subnets" + }, + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/networks", + "rel": "self" + } + ], + "name": "network", + "collection": "networks" + }, + { + "links": [ + { + "href": "http://23.253.228.211:9696/v2.0/ports", + "rel": "self" + } + ], + "name": "port", + "collection": "ports" + } + ] +} + `) + }) + + count := 0 + + apiversions.ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := apiversions.ExtractVersionResources(page) + if err != nil { + t.Errorf("Failed to extract version resources: %v", err) + return false, err + } + + expected := []apiversions.APIVersionResource{ + { + Name: "subnet", + Collection: "subnets", + }, + { + Name: "network", + Collection: "networks", + }, + { + Name: "port", + Collection: "ports", + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersionResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + apiversions.ListVersionResources(fake.ServiceClient(), "v2.0").EachPage(func(page pagination.Page) (bool, error) { + if _, err := apiversions.ExtractVersionResources(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/urls.go new file mode 100644 index 000000000000..0fa743776d3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/apiversions/urls.go @@ -0,0 +1,15 @@ +package apiversions + +import ( + "strings" + + "github.com/gophercloud/gophercloud" +) + +func apiVersionsURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func apiInfoURL(c *gophercloud.ServiceClient, version string) string { + return c.Endpoint + strings.TrimRight(version, "/") + "/" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/common/common_tests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/common/common_tests.go new file mode 100644 index 000000000000..7e1d917280cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/common/common_tests.go @@ -0,0 +1,14 @@ +package common + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const TokenID = client.TokenID + +func ServiceClient() *gophercloud.ServiceClient { + sc := client.ServiceClient() + sc.ResourceBase = sc.Endpoint + "v2.0/" + return sc +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/delegate.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/delegate.go new file mode 100644 index 000000000000..0c43689bb809 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/delegate.go @@ -0,0 +1,41 @@ +package extensions + +import ( + "github.com/gophercloud/gophercloud" + common "github.com/gophercloud/gophercloud/openstack/common/extensions" + "github.com/gophercloud/gophercloud/pagination" +) + +// Extension is a single OpenStack extension. +type Extension struct { + common.Extension +} + +// GetResult wraps a GetResult from common. +type GetResult struct { + common.GetResult +} + +// ExtractExtensions interprets a Page as a slice of Extensions. +func ExtractExtensions(page pagination.Page) ([]Extension, error) { + inner, err := common.ExtractExtensions(page) + if err != nil { + return nil, err + } + outer := make([]Extension, len(inner)) + for index, ext := range inner { + outer[index] = Extension{ext} + } + return outer, nil +} + +// Get retrieves information for a specific extension using its alias. +func Get(c *gophercloud.ServiceClient, alias string) GetResult { + return GetResult{common.Get(c, alias)} +} + +// List returns a Pager which allows you to iterate over the full collection of extensions. +// It does not accept query parameters. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return common.List(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go new file mode 100644 index 000000000000..eda010cb0c87 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/doc.go @@ -0,0 +1,53 @@ +/* +Package external provides information and interaction with the external +extension for the OpenStack Networking service. + +Example to List Networks with External Information + + iTrue := true + networkListOpts := networks.ListOpts{} + listOpts := external.ListOptsExt{ + ListOptsBuilder: networkListOpts, + External: &iTrue, + } + + type NetworkWithExternalExt struct { + networks.Network + external.NetworkExternalExt + } + + var allNetworks []NetworkWithExternalExt + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example to Create a Network with External Information + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "private", + AdminStateUp: &iTrue, + } + + createOpts := external.CreateOptsExt{ + networkCreateOpts, + &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package external diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go new file mode 100644 index 000000000000..ced5efed8d97 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/requests.go @@ -0,0 +1,84 @@ +package external + +import ( + "net/url" + "strconv" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// ListOptsExt adds the external network options to the base ListOpts. +type ListOptsExt struct { + networks.ListOptsBuilder + External *bool `q:"router:external"` +} + +// ToNetworkListQuery adds the router:external option to the base network +// list options. +func (opts ListOptsExt) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts.ListOptsBuilder) + if err != nil { + return "", err + } + + params := q.Query() + if opts.External != nil { + v := strconv.FormatBool(*opts.External) + params.Add("router:external", v) + } + + q = &url.URL{RawQuery: params.Encode()} + return q.String(), err +} + +// CreateOptsExt is the structure used when creating new external network +// resources. It embeds networks.CreateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type CreateOptsExt struct { + networks.CreateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkCreateMap adds the router:external options to the base network +// creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} + +// UpdateOptsExt is the structure used when updating existing external network +// resources. It embeds networks.UpdateOpts and so inherits all of its required +// and optional fields, with the addition of the External field. +type UpdateOptsExt struct { + networks.UpdateOptsBuilder + External *bool `json:"router:external,omitempty"` +} + +// ToNetworkUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + if opts.External == nil { + return base, nil + } + + networkMap := base["network"].(map[string]interface{}) + networkMap["router:external"] = opts.External + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go new file mode 100644 index 000000000000..7cbbffdcf8a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/results.go @@ -0,0 +1,8 @@ +package external + +// NetworkExternalExt represents a decorated form of a Network with based on the +// "external-net" extension. +type NetworkExternalExt struct { + // Specifies whether the network is an external network or not. + External bool `json:"router:external"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/doc.go new file mode 100644 index 000000000000..5641e7980a63 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/doc.go @@ -0,0 +1,2 @@ +// external unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/fixtures.go new file mode 100644 index 000000000000..f23bbea4a154 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/fixtures.go @@ -0,0 +1,61 @@ +package testing + +// These fixtures are here instead of in the underlying networks package +// because all network tests (including extensions) would have to +// implement the NetworkExternalExt extention for create/update tests +// to pass. + +const CreateRequest = ` +{ + "network": { + "name": "private", + "admin_state_up": true, + "router:external": false + } +}` + +const CreateResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": ["08eae331-0402-425a-923c-34f7cfe39c1b"], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": false, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": false + } +}` + +const UpdateRequest = ` +{ + "network": { + "name": "new_network_name", + "admin_state_up": false, + "shared": true, + "router:external": false + } +}` + +const UpdateResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "new_network_name", + "admin_state_up": false, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c", + "provider:segmentation_id": 1234567890, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": false + } +}` + +const ExpectedListOpts = "?id=d32019d3-bc6e-4319-9c1d-6722fc136a22&router%3Aexternal=true" diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/requests_test.go new file mode 100644 index 000000000000..29236b89286b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/requests_test.go @@ -0,0 +1,26 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListExternal(t *testing.T) { + var iTrue bool = true + + networkListOpts := networks.ListOpts{ + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + } + + listOpts := external.ListOptsExt{ + ListOptsBuilder: networkListOpts, + External: &iTrue, + } + + actual, err := listOpts.ToNetworkListQuery() + th.AssertNoErr(t, err) + th.AssertEquals(t, ExpectedListOpts, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/results_test.go new file mode 100644 index 000000000000..64a508824d89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external/testing/results_test.go @@ -0,0 +1,138 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/external" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + nettest "github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, nettest.ListResponse) + }) + + type NetworkWithExternalExt struct { + networks.Network + external.NetworkExternalExt + } + var actual []NetworkWithExternalExt + + allPages, err := networks.List(fake.ServiceClient(), networks.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = networks.ExtractNetworksInto(allPages, &actual) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "d32019d3-bc6e-4319-9c1d-6722fc136a22", actual[0].ID) + th.AssertEquals(t, true, actual[0].External) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, nettest.GetResponse) + }) + + var s struct { + networks.Network + external.NetworkExternalExt + } + + err := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "d32019d3-bc6e-4319-9c1d-6722fc136a22", s.ID) + th.AssertEquals(t, true, s.External) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateResponse) + }) + + iTrue := true + iFalse := false + networkCreateOpts := networks.CreateOpts{ + Name: "private", + AdminStateUp: &iTrue, + } + + externalCreateOpts := external.CreateOptsExt{ + CreateOptsBuilder: &networkCreateOpts, + External: &iFalse, + } + + _, err := networks.Create(fake.ServiceClient(), externalCreateOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateResponse) + }) + + iTrue := true + iFalse := false + networkUpdateOpts := networks.UpdateOpts{ + Name: "new_network_name", + AdminStateUp: &iFalse, + Shared: &iTrue, + } + + externalUpdateOpts := external.UpdateOptsExt{ + UpdateOptsBuilder: &networkUpdateOpts, + External: &iFalse, + } + + _, err := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", externalUpdateOpts).Extract() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go new file mode 100644 index 000000000000..ec5d6181d601 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/doc.go @@ -0,0 +1,80 @@ +/* +Package extradhcpopts allow to work with extra DHCP functionality of Neutron ports. + +Example to Get a Port with Extra DHCP Options + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + err := ports.Get(networkClient, portID).ExtractInto(&s) + if err != nil { + panic(err) + } + +Example to Create a Port with Extra DHCP Options + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + adminStateUp := true + portCreateOpts := ports.CreateOpts{ + Name: "dhcp-conf-port", + AdminStateUp: &adminStateUp, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + } + + createOpts := extradhcpopts.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + ExtraDHCPOpts: []extradhcpopts.CreateExtraDHCPOpt{ + { + OptName: "optionA", + OptValue: "valueA", + }, + }, + } + + err := ports.Create(networkClient, createOpts).ExtractInto(&s) + if err != nil { + panic(err) + } + +Example to Update a Port with Extra DHCP Options + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + portUpdateOpts := ports.UpdateOpts{ + Name: "updated-dhcp-conf-port", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + } + + value := "valueB" + updateOpts := extradhcpopts.UpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + ExtraDHCPOpts: []extradhcpopts.UpdateExtraDHCPOpt{ + { + OptName: "optionB", + OptValue: &value, + }, + }, + } + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + err := ports.Update(networkClient, portID, updateOpts).ExtractInto(&s) + if err != nil { + panic(err) + } +*/ +package extradhcpopts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go new file mode 100644 index 000000000000..f3eb9bc45070 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/requests.go @@ -0,0 +1,102 @@ +package extradhcpopts + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateOptsExt adds extra DHCP options to the base ports.CreateOpts. +type CreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + ports.CreateOptsBuilder + + // ExtraDHCPOpts field is a set of DHCP options for a single port. + ExtraDHCPOpts []CreateExtraDHCPOpt `json:"extra_dhcp_opts,omitempty"` +} + +// CreateExtraDHCPOpt represents the options required to create an extra DHCP +// option on a port. +type CreateExtraDHCPOpt struct { + // OptName is the name of a DHCP option. + OptName string `json:"opt_name" required:"true"` + + // OptValue is the value of the DHCP option. + OptValue string `json:"opt_value" required:"true"` + + // IPVersion is the IP protocol version of a DHCP option. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` +} + +// ToPortCreateMap casts a CreateOptsExt struct to a map. +func (opts CreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + // Convert opts.ExtraDHCPOpts to a slice of maps. + if opts.ExtraDHCPOpts != nil { + extraDHCPOpts := make([]map[string]interface{}, len(opts.ExtraDHCPOpts)) + for i, opt := range opts.ExtraDHCPOpts { + b, err := gophercloud.BuildRequestBody(opt, "") + if err != nil { + return nil, err + } + extraDHCPOpts[i] = b + } + port["extra_dhcp_opts"] = extraDHCPOpts + } + + return base, nil +} + +// UpdateOptsExt adds extra DHCP options to the base ports.UpdateOpts. +type UpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + ports.UpdateOptsBuilder + + // ExtraDHCPOpts field is a set of DHCP options for a single port. + ExtraDHCPOpts []UpdateExtraDHCPOpt `json:"extra_dhcp_opts,omitempty"` +} + +// UpdateExtraDHCPOpt represents the options required to update an extra DHCP +// option on a port. +type UpdateExtraDHCPOpt struct { + // OptName is the name of a DHCP option. + OptName string `json:"opt_name" required:"true"` + + // OptValue is the value of the DHCP option. + OptValue *string `json:"opt_value"` + + // IPVersion is the IP protocol version of a DHCP option. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + // Convert opts.ExtraDHCPOpts to a slice of maps. + if opts.ExtraDHCPOpts != nil { + extraDHCPOpts := make([]map[string]interface{}, len(opts.ExtraDHCPOpts)) + for i, opt := range opts.ExtraDHCPOpts { + b, err := gophercloud.BuildRequestBody(opt, "") + if err != nil { + return nil, err + } + extraDHCPOpts[i] = b + } + port["extra_dhcp_opts"] = extraDHCPOpts + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go new file mode 100644 index 000000000000..8e3132ea4a0d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts/results.go @@ -0,0 +1,20 @@ +package extradhcpopts + +// ExtraDHCPOptsExt is a struct that contains different DHCP options for a +// single port. +type ExtraDHCPOptsExt struct { + ExtraDHCPOpts []ExtraDHCPOpt `json:"extra_dhcp_opts"` +} + +// ExtraDHCPOpt represents a single set of extra DHCP options for a single port. +type ExtraDHCPOpt struct { + // OptName is the name of a single DHCP option. + OptName string `json:"opt_name"` + + // OptValue is the value of a single DHCP option. + OptValue string `json:"opt_value"` + + // IPVersion is the IP protocol version of a single DHCP option. + // Valid value is 4 or 6. Default is 4. + IPVersion int `json:"ip_version"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go new file mode 100644 index 000000000000..3ec450a7b3db --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/doc.go @@ -0,0 +1,3 @@ +// Package fwaas provides information and interaction with the Firewall +// as a Service extension for the OpenStack Networking service. +package fwaas diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go new file mode 100644 index 000000000000..c01070edc8c6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/doc.go @@ -0,0 +1,60 @@ +/* +Package firewalls allows management and retrieval of firewalls from the +OpenStack Networking Service. + +Example to List Firewalls + + listOpts := firewalls.ListOpts{ + TenantID: "tenant-id", + } + + allPages, err := firewalls.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFirewalls, err := firewalls.ExtractFirewalls(allPages) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall + + createOpts := firewalls.CreateOpts{ + Name: "firewall_1", + Description: "A firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + AdminStateUp: gophercloud.Enabled, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + updateOpts := firewalls.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Firewall + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + err := firewalls.Delete(networkClient, firewallID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package firewalls diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go new file mode 100644 index 000000000000..dd92bb20dbe4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/errors.go @@ -0,0 +1,11 @@ +package firewalls + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errPolicyRequired = err("A policy ID is required") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go new file mode 100644 index 000000000000..cbd6c1fb0d9c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/requests.go @@ -0,0 +1,139 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToFirewallListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall attributes you want to see returned. SortKey allows you to sort +// by a particular firewall attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp bool `q:"admin_state_up"` + Shared bool `q:"shared"` + PolicyID string `q:"firewall_policy_id"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToFirewallListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToFirewallListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewalls. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewalls that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToFirewallListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return FirewallPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall. +type CreateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Create accepts a CreateOpts struct and uses the values to create a new firewall. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall. +type UpdateOpts struct { + PolicyID string `json:"firewall_policy_id" required:"true"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToFirewallUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall") +} + +// Update allows firewalls to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go new file mode 100644 index 000000000000..9543f0fae4e3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/results.go @@ -0,0 +1,96 @@ +package firewalls + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Firewall is an OpenStack firewall. +type Firewall struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AdminStateUp bool `json:"admin_state_up"` + Status string `json:"status"` + PolicyID string `json:"firewall_policy_id"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall. +func (r commonResult) Extract() (*Firewall, error) { + var s Firewall + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "firewall") +} + +func ExtractFirewallsInto(r pagination.Page, v interface{}) error { + return r.(FirewallPage).Result.ExtractIntoSlicePtr(v, "firewalls") +} + +// FirewallPage is the page returned by a pager when traversing over a +// collection of firewalls. +type FirewallPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewalls has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r FirewallPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewalls_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FirewallPage struct is empty. +func (r FirewallPage) IsEmpty() (bool, error) { + is, err := ExtractFirewalls(r) + return len(is) == 0, err +} + +// ExtractFirewalls accepts a Page struct, specifically a FirewallPage struct, +// and extracts the elements into a slice of Firewall structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractFirewalls(r pagination.Page) ([]Firewall, error) { + var s []Firewall + err := ExtractFirewallsInto(r, &s) + return s, err +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as a Firewall. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret it as a Firewall. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret it as a Firewall. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/doc.go new file mode 100644 index 000000000000..82ebf979baf1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/doc.go @@ -0,0 +1,2 @@ +// firewalls unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/requests_test.go new file mode 100644 index 000000000000..13eca65b69ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/testing/requests_test.go @@ -0,0 +1,341 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewalls":[ + { + "status": "ACTIVE", + "name": "fw1", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall 1" + }, + { + "status": "PENDING_UPDATE", + "name": "fw2", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e299", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f99", + "description": "OpenStack firewall 2" + } + ] +} + `) + }) + + count := 0 + + firewalls.List(fake.ServiceClient(), firewalls.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := firewalls.ExtractFirewalls(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []firewalls.Firewall{ + { + Status: "ACTIVE", + Name: "fw1", + AdminStateUp: false, + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + PolicyID: "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + ID: "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + Description: "OpenStack firewall 1", + }, + { + Status: "PENDING_UPDATE", + Name: "fw2", + AdminStateUp: true, + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + PolicyID: "34be8c83-4d42-4dca-a74e-b77fffb8e299", + ID: "fb5b5315-64f6-4ea3-8e58-981cc37c6f99", + Description: "OpenStack firewall 2", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewalls":[ + { + "status": "ACTIVE", + "name": "fw1", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall 1", + "router_ids": ["abcd1234"] + }, + { + "status": "PENDING_UPDATE", + "name": "fw2", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e299", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f99", + "description": "OpenStack firewall 2" + } + ] +} + `) + }) + + type FirewallsWithExt struct { + firewalls.Firewall + routerinsertion.FirewallExt + } + + allPages, err := firewalls.List(fake.ServiceClient(), nil).AllPages() + th.AssertNoErr(t, err) + + var actual []FirewallsWithExt + err = firewalls.ExtractFirewallsInto(allPages, &actual) + th.AssertNoErr(t, err) + th.AssertEquals(t, 2, len(actual)) + th.AssertEquals(t, "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", actual[0].ID) + th.AssertEquals(t, "abcd1234", actual[0].RouterIDs[0]) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall":{ + "status": "PENDING_CREATE", + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + }) + + options := firewalls.CreateOpts{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "fw", + Description: "OpenStack firewall", + AdminStateUp: gophercloud.Enabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + _, err := firewalls.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/fb5b5315-64f6-4ea3-8e58-981cc37c6f61", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall" + } +} + `) + }) + + fw, err := firewalls.Get(fake.ServiceClient(), "fb5b5315-64f6-4ea3-8e58-981cc37c6f61").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "ACTIVE", fw.Status) + th.AssertEquals(t, "fw", fw.Name) + th.AssertEquals(t, "OpenStack firewall", fw.Description) + th.AssertEquals(t, true, fw.AdminStateUp) + th.AssertEquals(t, "34be8c83-4d42-4dca-a74e-b77fffb8e28a", fw.PolicyID) + th.AssertEquals(t, "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", fw.ID) + th.AssertEquals(t, "b4eedccc6fb74fa8a7ad6b08382b852b", fw.TenantID) +} + +func TestGetWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/fb5b5315-64f6-4ea3-8e58-981cc37c6f61", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "34be8c83-4d42-4dca-a74e-b77fffb8e28a", + "id": "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", + "description": "OpenStack firewall", + "router_ids": ["abcd1234"] + } +} + `) + }) + + var fw struct { + firewalls.Firewall + routerinsertion.FirewallExt + } + + err := firewalls.Get(fake.ServiceClient(), "fb5b5315-64f6-4ea3-8e58-981cc37c6f61").ExtractInto(&fw) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "ACTIVE", fw.Status) + th.AssertEquals(t, "fw", fw.Name) + th.AssertEquals(t, "OpenStack firewall", fw.Description) + th.AssertEquals(t, true, fw.AdminStateUp) + th.AssertEquals(t, "34be8c83-4d42-4dca-a74e-b77fffb8e28a", fw.PolicyID) + th.AssertEquals(t, "fb5b5315-64f6-4ea3-8e58-981cc37c6f61", fw.ID) + th.AssertEquals(t, "b4eedccc6fb74fa8a7ad6b08382b852b", fw.TenantID) + th.AssertEquals(t, "abcd1234", fw.RouterIDs[0]) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/ea5b5315-64f6-4ea3-8e58-981cc37c6576", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "updated fw", + "admin_state_up":false, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "id": "ea5b5315-64f6-4ea3-8e58-981cc37c6576", + "description": "OpenStack firewall" + } +} + `) + }) + + options := firewalls.UpdateOpts{ + Name: "fw", + Description: "updated fw", + AdminStateUp: gophercloud.Disabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + _, err := firewalls.Update(fake.ServiceClient(), "ea5b5315-64f6-4ea3-8e58-981cc37c6576", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := firewalls.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go new file mode 100644 index 000000000000..807ea1ab6563 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls/urls.go @@ -0,0 +1,16 @@ +package firewalls + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewalls" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go new file mode 100644 index 000000000000..ae824491f18d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/doc.go @@ -0,0 +1,84 @@ +/* +Package policies allows management and retrieval of Firewall Policies in the +OpenStack Networking Service. + +Example to List Policies + + listOpts := policies.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := policies.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := policies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + + for _, policy := range allPolicies { + fmt.Printf("%+v\n", policy) + } + +Example to Create a Policy + + createOpts := policies.CreateOpts{ + Name: "policy_1", + Description: "A policy", + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "7c4f087a-ed46-4ea8-8040-11ca460a61c0", + } + } + + policy, err := policies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + + updateOpts := policies.UpdateOpts{ + Description: "New Description", + } + + policy, err := policies.Update(networkClient, policyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + err := policies.Delete(networkClient, policyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add a Rule to a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleOpts := policies.InsertRuleOpts{ + ID: "98a58c87-76be-ae7c-a74e-b77fffb88d95", + } + + policy, err := policies.AddRule(networkClient, policyID, ruleOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule from a Policy + + policyID := "38aee955-6283-4279-b091-8b9c828000ec" + ruleID := "98a58c87-76be-ae7c-a74e-b77fffb88d95", + + policy, err := policies.RemoveRule(networkClient, policyID, ruleID).Extract() + if err != nil { + panic(err) + } +*/ +package policies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go new file mode 100644 index 000000000000..40ab7a8c46fa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/requests.go @@ -0,0 +1,179 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the firewall policy attributes you want to see returned. SortKey allows you +// to sort by a particular firewall policy attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + Shared *bool `q:"shared"` + Audited *bool `q:"audited"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall policies. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall policies that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFirewallPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall policy. +type CreateOpts struct { + // TenantID specifies a tenant to own the firewall. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToFirewallPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFirewallPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFirewallPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall policy. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Shared *bool `json:"shared,omitempty"` + Audited *bool `json:"audited,omitempty"` + Rules []string `json:"firewall_rules,omitempty"` +} + +// ToFirewallPolicyUpdateMap casts a CreateOpts struct to a map. +func (opts UpdateOpts) ToFirewallPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_policy") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFirewallPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// InsertRuleOptsBuilder allows extensions to add additional parameters to the +// InsertRule request. +type InsertRuleOptsBuilder interface { + ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) +} + +// InsertRuleOpts contains the values used when updating a policy's rules. +type InsertRuleOpts struct { + ID string `json:"firewall_rule_id" required:"true"` + BeforeRuleID string `json:"insert_before,omitempty"` + AfterRuleID string `json:"insert_after,omitempty"` +} + +func (opts InsertRuleOpts) ToFirewallPolicyInsertRuleMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddRule will add a rule to a policy. +func AddRule(c *gophercloud.ServiceClient, id string, opts InsertRuleOptsBuilder) (r InsertRuleResult) { + b, err := opts.ToFirewallPolicyInsertRuleMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(insertURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveRule will add a rule to a policy. +func RemoveRule(c *gophercloud.ServiceClient, id, ruleID string) (r RemoveRuleResult) { + b := map[string]interface{}{"firewall_rule_id": ruleID} + _, r.Err = c.Put(removeURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go new file mode 100644 index 000000000000..495cef2c0e73 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/results.go @@ -0,0 +1,104 @@ +package policies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is a firewall policy. +type Policy struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` + Audited bool `json:"audited"` + Shared bool `json:"shared"` + Rules []string `json:"firewall_rules,omitempty"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"firewall_policy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of firewall policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_policies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"firewall_policies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Policy. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its +// Extract method to interpret it as a Policy. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// InsertRuleResult represents the result of an InsertRule operation. Call its +// Extract method to interpret it as a Policy. +type InsertRuleResult struct { + commonResult +} + +// RemoveRuleResult represents the result of a RemoveRule operation. Call its +// Extract method to interpret it as a Policy. +type RemoveRuleResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/doc.go new file mode 100644 index 000000000000..a61f5488e31e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/doc.go @@ -0,0 +1,2 @@ +// policies unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/requests_test.go new file mode 100644 index 000000000000..11b9848f5297 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/testing/requests_test.go @@ -0,0 +1,274 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policies": [ + { + "name": "policy1", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": true, + "shared": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy 1" + }, + { + "name": "policy2", + "firewall_rules": [ + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "shared": true, + "id": "c854fab5-bdaf-4a86-9359-78de93e5df01", + "description": "Firewall policy 2" + } + ] +} + `) + }) + + count := 0 + + policies.List(fake.ServiceClient(), policies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := policies.ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []policies.Policy{ + { + Name: "policy1", + Rules: []string{ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + }, + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Audited: true, + Shared: false, + ID: "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + Description: "Firewall policy 1", + }, + { + Name: "policy2", + Rules: []string{ + "03d2a6ad-633f-431a-8463-4370d06a22c8", + }, + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Audited: false, + Shared: true, + ID: "c854fab5-bdaf-4a86-9359-78de93e5df01", + Description: "Firewall policy 2", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "description": "Firewall policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": true, + "shared": false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy" + } +} + `) + }) + + options := policies.CreateOpts{ + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Name: "policy", + Description: "Firewall policy", + Shared: gophercloud.Disabled, + Audited: gophercloud.Enabled, + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32", + }, + } + + _, err := policies.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/bcab5315-64f6-4ea3-8e58-981cc37c6f61", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "www", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy web" + } +} + `) + }) + + policy, err := policies.Get(fake.ServiceClient(), "bcab5315-64f6-4ea3-8e58-981cc37c6f61").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "www", policy.Name) + th.AssertEquals(t, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", policy.ID) + th.AssertEquals(t, "Firewall policy web", policy.Description) + th.AssertEquals(t, 3, len(policy.Rules)) + th.AssertEquals(t, "75452b36-268e-4e75-aaf4-f0e7ed50bc97", policy.Rules[0]) + th.AssertEquals(t, "c9e77ca0-1bc8-497d-904d-948107873dc6", policy.Rules[1]) + th.AssertEquals(t, "03d2a6ad-633f-431a-8463-4370d06a22c8", policy.Rules[2]) + th.AssertEquals(t, "9145d91459d248b1b02fdaca97c6a75d", policy.TenantID) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/f2b08c1e-aa81-4668-8ae1-1401bcb0576c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32" + ], + "description": "Firewall policy" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_policy":{ + "name": "policy", + "firewall_rules": [ + "75452b36-268e-4e75-aaf4-f0e7ed50bc97", + "c9e77ca0-1bc8-497d-904d-948107873dc6", + "03d2a6ad-633f-431a-8463-4370d06a22c8" + ], + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "audited": false, + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "Firewall policy" + } +} + `) + }) + + options := policies.UpdateOpts{ + Name: "policy", + Description: "Firewall policy", + Rules: []string{ + "98a58c87-76be-ae7c-a74e-b77fffb88d95", + "11a58c87-76be-ae7c-a74e-b77fffb88a32", + }, + } + + _, err := policies.Update(fake.ServiceClient(), "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_policies/4ec89077-d057-4a2b-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := policies.Delete(fake.ServiceClient(), "4ec89077-d057-4a2b-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go new file mode 100644 index 000000000000..c252b79dd0cd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/policies/urls.go @@ -0,0 +1,26 @@ +package policies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_policies" + insertPath = "insert_rule" + removePath = "remove_rule" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func insertURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, insertPath) +} + +func removeURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, removePath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go new file mode 100644 index 000000000000..4f0a779eec95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/doc.go @@ -0,0 +1,68 @@ +/* +Package routerinsertion implements the fwaasrouterinsertion Firewall extension. +It is used to manage the router information of a firewall. + +Example to List Firewalls with Router Information + + type FirewallsWithRouters struct { + firewalls.Firewall + routerinsertion.FirewallExt + } + + var allFirewalls []FirewallsWithRouters + + allPages, err := firewalls.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = firewalls.ExtractFirewallsInto(allPages, &allFirewalls) + if err != nil { + panic(err) + } + + for _, fw := range allFirewalls { + fmt.Printf("%+v\n", fw) + } + +Example to Create a Firewall with a Router + + firewallCreateOpts := firewalls.CreateOpts{ + Name: "firewall_1", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Firewall with a Router + + firewallID := "a6917946-38ab-4ffd-a55a-26c0980ce5ee" + + firewallUpdateOpts := firewalls.UpdateOpts{ + Description: "updated firewall", + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + + updateOpts := routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: firewallUpdateOpts, + RouterIDs: []string{ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8", + }, + } + + firewall, err := firewalls.Update(networkClient, firewallID, updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routerinsertion diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go new file mode 100644 index 000000000000..b1f6d76e387f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/requests.go @@ -0,0 +1,43 @@ +package routerinsertion + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" +) + +// CreateOptsExt adds the RouterIDs option to the base CreateOpts. +type CreateOptsExt struct { + firewalls.CreateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallCreateMap adds router_ids to the base firewall creation options. +func (opts CreateOptsExt) ToFirewallCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToFirewallCreateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} + +// UpdateOptsExt adds the RouterIDs option to the base UpdateOpts. +type UpdateOptsExt struct { + firewalls.UpdateOptsBuilder + RouterIDs []string `json:"router_ids"` +} + +// ToFirewallUpdateMap adds router_ids to the base firewall update options. +func (opts UpdateOptsExt) ToFirewallUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToFirewallUpdateMap() + if err != nil { + return nil, err + } + + firewallMap := base["firewall"].(map[string]interface{}) + firewallMap["router_ids"] = opts.RouterIDs + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go new file mode 100644 index 000000000000..85c288e51e67 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/results.go @@ -0,0 +1,7 @@ +package routerinsertion + +// FirewallExt is an extension to the base Firewall object +type FirewallExt struct { + // RouterIDs are the routers that the firewall is attached to. + RouterIDs []string `json:"router_ids"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/doc.go new file mode 100644 index 000000000000..86e710f6e198 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/doc.go @@ -0,0 +1,2 @@ +// routerinsertion unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/requests_test.go new file mode 100644 index 000000000000..ac7a2be8d260 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion/testing/requests_test.go @@ -0,0 +1,235 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/firewalls" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/routerinsertion" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "router_ids": [ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8" + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall":{ + "status": "PENDING_CREATE", + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + }) + + firewallCreateOpts := firewalls.CreateOpts{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "fw", + Description: "OpenStack firewall", + AdminStateUp: gophercloud.Enabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{"8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8"}, + } + + _, err := firewalls.Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) +} + +func TestCreateWithNoRouters(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "router_ids": [] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall":{ + "status": "PENDING_CREATE", + "name": "fw", + "description": "OpenStack firewall", + "admin_state_up": true, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c" + } +} + `) + }) + + firewallCreateOpts := firewalls.CreateOpts{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "fw", + Description: "OpenStack firewall", + AdminStateUp: gophercloud.Enabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + createOpts := routerinsertion.CreateOptsExt{ + CreateOptsBuilder: firewallCreateOpts, + RouterIDs: []string{}, + } + + _, err := firewalls.Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/ea5b5315-64f6-4ea3-8e58-981cc37c6576", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "updated fw", + "admin_state_up":false, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "router_ids": [ + "8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8" + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "id": "ea5b5315-64f6-4ea3-8e58-981cc37c6576", + "description": "OpenStack firewall" + } +} + `) + }) + + firewallUpdateOpts := firewalls.UpdateOpts{ + Name: "fw", + Description: "updated fw", + AdminStateUp: gophercloud.Disabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + updateOpts := routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: firewallUpdateOpts, + RouterIDs: []string{"8a3a0d6a-34b5-4a92-b65d-6375a4c1e9e8"}, + } + + _, err := firewalls.Update(fake.ServiceClient(), "ea5b5315-64f6-4ea3-8e58-981cc37c6576", updateOpts).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdateWithNoRouters(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewalls/ea5b5315-64f6-4ea3-8e58-981cc37c6576", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall":{ + "name": "fw", + "description": "updated fw", + "admin_state_up":false, + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "router_ids": [] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall": { + "status": "ACTIVE", + "name": "fw", + "admin_state_up": false, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "firewall_policy_id": "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + "id": "ea5b5315-64f6-4ea3-8e58-981cc37c6576", + "description": "OpenStack firewall" + } +} + `) + }) + + firewallUpdateOpts := firewalls.UpdateOpts{ + Name: "fw", + Description: "updated fw", + AdminStateUp: gophercloud.Disabled, + PolicyID: "19ab8c87-4a32-4e6a-a74e-b77fffb89a0c", + } + updateOpts := routerinsertion.UpdateOptsExt{ + UpdateOptsBuilder: firewallUpdateOpts, + RouterIDs: []string{}, + } + + _, err := firewalls.Update(fake.ServiceClient(), "ea5b5315-64f6-4ea3-8e58-981cc37c6576", updateOpts).Extract() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go new file mode 100644 index 000000000000..3351a3e5c95a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/doc.go @@ -0,0 +1,64 @@ +/* +Package rules enables management and retrieval of Firewall Rules in the +OpenStack Networking Service. + +Example to List Rules + + listOpts := rules.ListOpts{ + Protocol: rules.ProtocolAny, + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Rule + + createOpts := rules.CreateOpts{ + Action: "allow", + Protocol: rules.ProtocolTCP, + Description: "ssh", + DestinationPort: 22, + DestinationIPAddress: "192.168.1.0/24", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + newPort := 80 + newDescription := "http" + + updateOpts := rules.UpdateOpts{ + Description: &newDescription, + port: &newPort, + } + + rule, err := rules.Update(networkClient, ruleID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Rule + + ruleID := "f03bd950-6c56-4f5e-a307-45967078f507" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go new file mode 100644 index 000000000000..0b29d39fd9e1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/errors.go @@ -0,0 +1,12 @@ +package rules + +import "fmt" + +func err(str string) error { + return fmt.Errorf("%s", str) +} + +var ( + errProtocolRequired = err("A protocol is required (tcp, udp, icmp or any)") + errActionRequired = err("An action is required (allow or deny)") +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go new file mode 100644 index 000000000000..17979b637bba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/requests.go @@ -0,0 +1,190 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type ( + // Protocol represents a valid rule protocol. + Protocol string +) + +const ( + // ProtocolAny is to allow any protocol. + ProtocolAny Protocol = "any" + + // ProtocolICMP is to allow the ICMP protocol. + ProtocolICMP Protocol = "icmp" + + // ProtocolTCP is to allow the TCP protocol. + ProtocolTCP Protocol = "tcp" + + // ProtocolUDP is to allow the UDP protocol. + ProtocolUDP Protocol = "udp" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRuleListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Firewall rule attributes you want to see returned. SortKey allows you to +// sort by a particular firewall rule attribute. SortDir sets the direction, and +// is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Name string `q:"name"` + Description string `q:"description"` + Protocol string `q:"protocol"` + Action string `q:"action"` + IPVersion int `q:"ip_version"` + SourceIPAddress string `q:"source_ip_address"` + DestinationIPAddress string `q:"destination_ip_address"` + SourcePort string `q:"source_port"` + DestinationPort string `q:"destination_port"` + Enabled bool `q:"enabled"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRuleListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRuleListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// firewall rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those firewall rules that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + + if opts != nil { + query, err := opts.ToRuleListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new firewall rule. +type CreateOpts struct { + Protocol Protocol `json:"protocol" required:"true"` + Action string `json:"action" required:"true"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToRuleCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "firewall_rule") + if err != nil { + return nil, err + } + + if m := b["firewall_rule"].(map[string]interface{}); m["protocol"] == "any" { + m["protocol"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// firewall rule. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular firewall rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRuleUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a firewall rule. +// These fields are all pointers so that unset fields will not cause the +// existing Rule attribute to be removed. +type UpdateOpts struct { + Protocol *string `json:"protocol,omitempty"` + Action *string `json:"action,omitempty"` + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + IPVersion *gophercloud.IPVersion `json:"ip_version,omitempty"` + SourceIPAddress *string `json:"source_ip_address,omitempty"` + DestinationIPAddress *string `json:"destination_ip_address,omitempty"` + SourcePort *string `json:"source_port,omitempty"` + DestinationPort *string `json:"destination_port,omitempty"` + Shared *bool `json:"shared,omitempty"` + Enabled *bool `json:"enabled,omitempty"` +} + +// ToRuleUpdateMap casts a UpdateOpts struct to a map. +func (opts UpdateOpts) ToRuleUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "firewall_rule") +} + +// Update allows firewall policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRuleUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular firewall rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go new file mode 100644 index 000000000000..82bf4a36a881 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/results.go @@ -0,0 +1,100 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Rule represents a firewall rule. +type Rule struct { + ID string `json:"id"` + Name string `json:"name,omitempty"` + Description string `json:"description,omitempty"` + Protocol string `json:"protocol"` + Action string `json:"action"` + IPVersion int `json:"ip_version,omitempty"` + SourceIPAddress string `json:"source_ip_address,omitempty"` + DestinationIPAddress string `json:"destination_ip_address,omitempty"` + SourcePort string `json:"source_port,omitempty"` + DestinationPort string `json:"destination_port,omitempty"` + Shared bool `json:"shared,omitempty"` + Enabled bool `json:"enabled,omitempty"` + PolicyID string `json:"firewall_policy_id"` + Position int `json:"position"` + TenantID string `json:"tenant_id"` + ProjectID string `json:"project_id"` +} + +// RulePage is the page returned by a pager when traversing over a +// collection of firewall rules. +type RulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of firewall rules has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r RulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"firewall_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RulePage struct is empty. +func (r RulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a RulePage struct, +// and extracts the elements into a slice of Rule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]Rule, error) { + var s struct { + Rules []Rule `json:"firewall_rules"` + } + err := (r.(RulePage)).ExtractInto(&s) + return s.Rules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a firewall rule. +func (r commonResult) Extract() (*Rule, error) { + var s struct { + Rule *Rule `json:"firewall_rule"` + } + err := r.ExtractInto(&s) + return s.Rule, err +} + +// GetResult represents the result of a get operation. Call its Extract method +// to interpret it as a Rule. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Rule. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Rule. +type CreateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/doc.go new file mode 100644 index 000000000000..df31e6c5c3be --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/doc.go @@ -0,0 +1,2 @@ +// rules unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/requests_test.go new file mode 100644 index 000000000000..2fedfa8ac777 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/testing/requests_test.go @@ -0,0 +1,381 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rules": [ + { + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + }, + { + "protocol": "udp", + "description": "udp rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": null, + "firewall_policy_id": "98d7fb51-698c-4123-87e8-f1eee6b5ab7e", + "position": 1, + "destination_port": null, + "id": "ab7bd950-6c56-4f5e-a307-45967078f890", + "name": "deny_all_udp", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "deny", + "ip_version": 4, + "shared": false + } + ] +} + `) + }) + + count := 0 + + rules.List(fake.ServiceClient(), rules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := rules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []rules.Rule{ + { + Protocol: "tcp", + Description: "ssh rule", + SourcePort: "", + SourceIPAddress: "", + DestinationIPAddress: "192.168.1.0/24", + PolicyID: "e2a5fb51-698c-4898-87e8-f1eee6b50919", + Position: 2, + DestinationPort: "22", + ID: "f03bd950-6c56-4f5e-a307-45967078f507", + Name: "ssh_form_any", + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Enabled: true, + Action: "allow", + IPVersion: 4, + Shared: false, + }, + { + Protocol: "udp", + Description: "udp rule", + SourcePort: "", + SourceIPAddress: "", + DestinationIPAddress: "", + PolicyID: "98d7fb51-698c-4123-87e8-f1eee6b5ab7e", + Position: 1, + DestinationPort: "", + ID: "ab7bd950-6c56-4f5e-a307-45967078f890", + Name: "deny_all_udp", + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Enabled: true, + Action: "deny", + IPVersion: 4, + Shared: false, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_rule": { + "protocol": "tcp", + "description": "ssh rule", + "destination_ip_address": "192.168.1.0/24", + "destination_port": "22", + "name": "ssh_form_any", + "action": "allow", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + options := rules.CreateOpts{ + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Protocol: rules.ProtocolTCP, + Description: "ssh rule", + DestinationIPAddress: "192.168.1.0/24", + DestinationPort: "22", + Name: "ssh_form_any", + Action: "allow", + } + + _, err := rules.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestCreateAnyProtocol(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_rule": { + "protocol": null, + "description": "any to 192.168.1.0/24", + "destination_ip_address": "192.168.1.0/24", + "name": "any_to_192.168.1.0/24", + "action": "allow", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": null, + "description": "any to 192.168.1.0/24", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": null, + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "any_to_192.168.1.0/24", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + options := rules.CreateOpts{ + TenantID: "80cf934d6ffb4ef5b244f1c512ad1e61", + Protocol: rules.ProtocolAny, + Description: "any to 192.168.1.0/24", + DestinationIPAddress: "192.168.1.0/24", + Name: "any_to_192.168.1.0/24", + Action: "allow", + } + + _, err := rules.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/f03bd950-6c56-4f5e-a307-45967078f507", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "source_port": null, + "source_ip_address": null, + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": true, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + rule, err := rules.Get(fake.ServiceClient(), "f03bd950-6c56-4f5e-a307-45967078f507").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "tcp", rule.Protocol) + th.AssertEquals(t, "ssh rule", rule.Description) + th.AssertEquals(t, "192.168.1.0/24", rule.DestinationIPAddress) + th.AssertEquals(t, "e2a5fb51-698c-4898-87e8-f1eee6b50919", rule.PolicyID) + th.AssertEquals(t, 2, rule.Position) + th.AssertEquals(t, "22", rule.DestinationPort) + th.AssertEquals(t, "f03bd950-6c56-4f5e-a307-45967078f507", rule.ID) + th.AssertEquals(t, "ssh_form_any", rule.Name) + th.AssertEquals(t, "80cf934d6ffb4ef5b244f1c512ad1e61", rule.TenantID) + th.AssertEquals(t, true, rule.Enabled) + th.AssertEquals(t, "allow", rule.Action) + th.AssertEquals(t, 4, rule.IPVersion) + th.AssertEquals(t, false, rule.Shared) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/f03bd950-6c56-4f5e-a307-45967078f507", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "destination_ip_address": "192.168.1.0/24", + "destination_port": "22", + "name": "ssh_form_any", + "action": "allow", + "enabled": false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "firewall_rule":{ + "protocol": "tcp", + "description": "ssh rule", + "destination_ip_address": "192.168.1.0/24", + "firewall_policy_id": "e2a5fb51-698c-4898-87e8-f1eee6b50919", + "position": 2, + "destination_port": "22", + "id": "f03bd950-6c56-4f5e-a307-45967078f507", + "name": "ssh_form_any", + "tenant_id": "80cf934d6ffb4ef5b244f1c512ad1e61", + "enabled": false, + "action": "allow", + "ip_version": 4, + "shared": false + } +} + `) + }) + + newProtocol := "tcp" + newDescription := "ssh rule" + newDestinationIP := "192.168.1.0/24" + newDestintionPort := "22" + newName := "ssh_form_any" + newAction := "allow" + + options := rules.UpdateOpts{ + Protocol: &newProtocol, + Description: &newDescription, + DestinationIPAddress: &newDestinationIP, + DestinationPort: &newDestintionPort, + Name: &newName, + Action: &newAction, + Enabled: gophercloud.Disabled, + } + + _, err := rules.Update(fake.ServiceClient(), "f03bd950-6c56-4f5e-a307-45967078f507", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/fw/firewall_rules/4ec89077-d057-4a2b-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := rules.Delete(fake.ServiceClient(), "4ec89077-d057-4a2b-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go new file mode 100644 index 000000000000..79654be73e26 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/fwaas/rules/urls.go @@ -0,0 +1,16 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "fw" + resourcePath = "firewall_rules" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/doc.go new file mode 100644 index 000000000000..d533458267e8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/doc.go @@ -0,0 +1,5 @@ +// Package layer3 provides access to the Layer-3 networking extension for the +// OpenStack Neutron service. This extension allows API users to route packets +// between subnets, forward packets from internal networks to external ones, +// and access instances from external networks through floating IPs. +package layer3 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go new file mode 100644 index 000000000000..bf5ec6807cc6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/doc.go @@ -0,0 +1,71 @@ +/* +package floatingips enables management and retrieval of Floating IPs from the +OpenStack Networking service. + +Example to List Floating IPs + + listOpts := floatingips.ListOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + allPages, err := floatingips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allFIPs, err := floatingips.ExtractFloatingIPs(allPages) + if err != nil { + panic(err) + } + + for _, fip := range allFIPs { + fmt.Printf("%+v\n", fip) + } + +Example to Create a Floating IP + + createOpts := floatingips.CreateOpts{ + FloatingNetworkID: "a6917946-38ab-4ffd-a55a-26c0980ce5ee", + } + + fip, err := floatingips.Create(networkingClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + portID := "76d0a61b-b8e5-490c-9892-4cf674f2bec8" + + updateOpts := floatingips.UpdateOpts{ + PortID: &portID, + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Floating IP with a Port + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + + updateOpts := floatingips.UpdateOpts{ + PortID: nil, + } + + fip, err := floatingips.Update(networkingClient, fipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Floating IP + + fipID := "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + err := floatingips.Delete(networkClient, fipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package floatingips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go new file mode 100644 index 000000000000..d82a1bc8e294 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/requests.go @@ -0,0 +1,150 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + FloatingNetworkID string `q:"floating_network_id"` + PortID string `q:"port_id"` + FixedIP string `q:"fixed_ip_address"` + FloatingIP string `q:"floating_ip_address"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` + RouterID string `q:"router_id"` + Status string `q:"status"` +} + +// List returns a Pager which allows you to iterate over a collection of +// floating IP resources. It accepts a ListOpts struct, which allows you to +// filter and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return FloatingIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToFloatingIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new floating IP +// resource. The only required fields are FloatingNetworkID and PortID which +// refer to the external network and internal port respectively. +type CreateOpts struct { + FloatingNetworkID string `json:"floating_network_id" required:"true"` + FloatingIP string `json:"floating_ip_address,omitempty"` + PortID string `json:"port_id,omitempty"` + FixedIP string `json:"fixed_ip_address,omitempty"` + SubnetID string `json:"subnet_id,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` +} + +// ToFloatingIPCreateMap allows CreateOpts to satisfy the CreateOptsBuilder +// interface +func (opts CreateOpts) ToFloatingIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "floatingip") +} + +// Create accepts a CreateOpts struct and uses the values provided to create a +// new floating IP resource. You can create floating IPs on external networks +// only. If you provide a FloatingNetworkID which refers to a network that is +// not external (i.e. its `router:external' attribute is False), the operation +// will fail and return a 400 error. +// +// If you do not specify a FloatingIP address value, the operation will +// automatically allocate an available address for the new resource. If you do +// choose to specify one, it must fall within the subnet range for the external +// network - otherwise the operation returns a 400 error. If the FloatingIP +// address is already in use, the operation returns a 409 error code. +// +// You can associate the new resource with an internal port by using the PortID +// field. If you specify a PortID that is not valid, the operation will fail and +// return 404 error code. +// +// You must also configure an IP address for the port associated with the PortID +// you have provided - this is what the FixedIP refers to: an IP fixed to a +// port. Because a port might be associated with multiple IP addresses, you can +// use the FixedIP field to associate a particular IP address rather than have +// the API assume for you. If you specify an IP address that is not valid, the +// operation will fail and return a 400 error code. If the PortID and FixedIP +// are already associated with another resource, the operation will fail and +// returns a 409 error code. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToFloatingIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular floating IP resource based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToFloatingIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a floating IP resource. The +// only value that can be updated is which internal port the floating IP is +// linked to. To associate the floating IP with a new internal port, provide its +// ID. To disassociate the floating IP from all ports, provide an empty string. +type UpdateOpts struct { + PortID *string `json:"port_id"` +} + +// ToFloatingIPUpdateMap allows UpdateOpts to satisfy the UpdateOptsBuilder +// interface +func (opts UpdateOpts) ToFloatingIPUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "floatingip") +} + +// Update allows floating IP resources to be updated. Currently, the only way to +// "update" a floating IP is to associate it with a new internal port, or +// disassociated it from all ports. See UpdateOpts for instructions of how to +// do this. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToFloatingIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular floating IP resource. Please +// ensure this is what you want - you can also disassociate the IP from existing +// internal ports. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go new file mode 100644 index 000000000000..8b1a51764533 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/results.go @@ -0,0 +1,119 @@ +package floatingips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// FloatingIP represents a floating IP resource. A floating IP is an external +// IP address that is mapped to an internal port and, optionally, a specific +// IP address on a private network. In other words, it enables access to an +// instance on a private network from an external network. For this reason, +// floating IPs can only be defined on networks where the `router:external' +// attribute (provided by the external network extension) is set to True. +type FloatingIP struct { + // ID is the unique identifier for the floating IP instance. + ID string `json:"id"` + + // FloatingNetworkID is the UUID of the external network where the floating + // IP is to be created. + FloatingNetworkID string `json:"floating_network_id"` + + // FloatingIP is the address of the floating IP on the external network. + FloatingIP string `json:"floating_ip_address"` + + // PortID is the UUID of the port on an internal network that is associated + // with the floating IP. + PortID string `json:"port_id"` + + // FixedIP is the specific IP address of the internal port which should be + // associated with the floating IP. + FixedIP string `json:"fixed_ip_address"` + + // TenantID is the project owner of the floating IP. Only admin users can + // specify a project identifier other than its own. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the floating IP. + ProjectID string `json:"project_id"` + + // Status is the condition of the API resource. + Status string `json:"status"` + + // RouterID is the ID of the router used for this floating IP. + RouterID string `json:"router_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will extract a FloatingIP resource from a result. +func (r commonResult) Extract() (*FloatingIP, error) { + var s struct { + FloatingIP *FloatingIP `json:"floatingip"` + } + err := r.ExtractInto(&s) + return s.FloatingIP, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a FloatingIP. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a FloatingIP. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a FloatingIP. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of an update operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// FloatingIPPage is the page returned by a pager when traversing over a +// collection of floating IPs. +type FloatingIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of floating IPs has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r FloatingIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"floatingips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a FloatingIPPage struct is empty. +func (r FloatingIPPage) IsEmpty() (bool, error) { + is, err := ExtractFloatingIPs(r) + return len(is) == 0, err +} + +// ExtractFloatingIPs accepts a Page struct, specifically a FloatingIPPage +// struct, and extracts the elements into a slice of FloatingIP structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractFloatingIPs(r pagination.Page) ([]FloatingIP, error) { + var s struct { + FloatingIPs []FloatingIP `json:"floatingips"` + } + err := (r.(FloatingIPPage)).ExtractInto(&s) + return s.FloatingIPs, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/doc.go new file mode 100644 index 000000000000..82dfbe7fed3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/doc.go @@ -0,0 +1,2 @@ +// floatingips unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/requests_test.go new file mode 100644 index 000000000000..89c028e8a720 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/testing/requests_test.go @@ -0,0 +1,415 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingips": [ + { + "floating_network_id": "6d67c30a-ddb4-49a1-bec3-a65b286b4170", + "router_id": null, + "fixed_ip_address": null, + "floating_ip_address": "192.0.0.4", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": null, + "id": "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e", + "router_id": "1117c30a-ddb4-49a1-bec3-a65b286b4170" + }, + { + "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64", + "router_id": "0a24cb83-faf5-4d7f-b723-3144ed8a2167", + "fixed_ip_address": "192.0.0.2", + "floating_ip_address": "10.0.0.3", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25", + "id": "ada25a95-f321-4f59-b0e0-f3a970dd3d63", + "router_id": "2227c30a-ddb4-49a1-bec3-a65b286b4170" + } + ] +} + `) + }) + + count := 0 + + floatingips.List(fake.ServiceClient(), floatingips.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := floatingips.ExtractFloatingIPs(page) + if err != nil { + t.Errorf("Failed to extract floating IPs: %v", err) + return false, err + } + + expected := []floatingips.FloatingIP{ + { + FloatingNetworkID: "6d67c30a-ddb4-49a1-bec3-a65b286b4170", + FixedIP: "", + FloatingIP: "192.0.0.4", + TenantID: "017d8de156df4177889f31a9bd6edc00", + Status: "DOWN", + PortID: "", + ID: "2f95fd2b-9f6a-4e8e-9e9a-2cbe286cbf9e", + RouterID: "1117c30a-ddb4-49a1-bec3-a65b286b4170", + }, + { + FloatingNetworkID: "90f742b1-6d17-487b-ba95-71881dbc0b64", + FixedIP: "192.0.0.2", + FloatingIP: "10.0.0.3", + TenantID: "017d8de156df4177889f31a9bd6edc00", + Status: "DOWN", + PortID: "74a342ce-8e07-4e91-880c-9f834b68fa25", + ID: "ada25a95-f321-4f59-b0e0-f3a970dd3d63", + RouterID: "2227c30a-ddb4-49a1-bec3-a65b286b4170", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestInvalidNextPageURLs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, `{"floatingips": [{}], "floatingips_links": {}}`) + }) + + floatingips.List(fake.ServiceClient(), floatingips.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + floatingips.ExtractFloatingIPs(page) + return true, nil + }) +} + +func TestRequiredFieldsForCreate(t *testing.T) { + res1 := floatingips.Create(fake.ServiceClient(), floatingips.CreateOpts{FloatingNetworkID: ""}) + if res1.Err == nil { + t.Fatalf("Expected error, got none") + } + + res2 := floatingips.Create(fake.ServiceClient(), floatingips.CreateOpts{FloatingNetworkID: "foo", PortID: ""}) + if res2.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": "10.0.0.3", + "floating_ip_address": "", + "port_id": "ce705c24-c1ef-408a-bda3-7bbd946164ab", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + options := floatingips.CreateOpts{ + FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57", + PortID: "ce705c24-c1ef-408a-bda3-7bbd946164ab", + } + + ip, err := floatingips.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID) + th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID) + th.AssertEquals(t, "", ip.FloatingIP) + th.AssertEquals(t, "ce705c24-c1ef-408a-bda3-7bbd946164ab", ip.PortID) + th.AssertEquals(t, "10.0.0.3", ip.FixedIP) +} + +func TestCreateEmptyPort(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "floatingip": { + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` + { + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": "10.0.0.3", + "floating_ip_address": "", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } + } + `) + }) + + options := floatingips.CreateOpts{ + FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57", + } + + ip, err := floatingips.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID) + th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID) + th.AssertEquals(t, "", ip.FloatingIP) + th.AssertEquals(t, "", ip.PortID) + th.AssertEquals(t, "10.0.0.3", ip.FixedIP) +} + +func TestCreateWithSubnetID(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "subnet_id": "37adf01c-24db-467a-b845-7ab1e8216c01" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": null, + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": null, + "floating_ip_address": "172.24.4.3", + "port_id": null, + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + options := floatingips.CreateOpts{ + FloatingNetworkID: "376da547-b977-4cfe-9cba-275c80debf57", + SubnetID: "37adf01c-24db-467a-b845-7ab1e8216c01", + } + + ip, err := floatingips.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "4969c491a3c74ee4af974e6d800c62de", ip.TenantID) + th.AssertEquals(t, "376da547-b977-4cfe-9cba-275c80debf57", ip.FloatingNetworkID) + th.AssertEquals(t, "172.24.4.3", ip.FloatingIP) + th.AssertEquals(t, "", ip.PortID) + th.AssertEquals(t, "", ip.FixedIP) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "floating_network_id": "90f742b1-6d17-487b-ba95-71881dbc0b64", + "fixed_ip_address": "192.0.0.2", + "floating_ip_address": "10.0.0.3", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "status": "DOWN", + "port_id": "74a342ce-8e07-4e91-880c-9f834b68fa25", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7", + "router_id": "1117c30a-ddb4-49a1-bec3-a65b286b4170" + } +} + `) + }) + + ip, err := floatingips.Get(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "90f742b1-6d17-487b-ba95-71881dbc0b64", ip.FloatingNetworkID) + th.AssertEquals(t, "10.0.0.3", ip.FloatingIP) + th.AssertEquals(t, "74a342ce-8e07-4e91-880c-9f834b68fa25", ip.PortID) + th.AssertEquals(t, "192.0.0.2", ip.FixedIP) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", ip.TenantID) + th.AssertEquals(t, "DOWN", ip.Status) + th.AssertEquals(t, "2f245a7b-796b-4f26-9cf9-9e82d248fda7", ip.ID) + th.AssertEquals(t, "1117c30a-ddb4-49a1-bec3-a65b286b4170", ip.RouterID) +} + +func TestAssociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": null, + "floating_ip_address": "172.24.4.228", + "port_id": "423abc8d-2991-4a55-ba98-2aaea84cc72e", + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + portID := "423abc8d-2991-4a55-ba98-2aaea84cc72e" + ip, err := floatingips.Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", floatingips.UpdateOpts{PortID: &portID}).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, portID, ip.PortID) +} + +func TestDisassociate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "floatingip": { + "port_id": null + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "floatingip": { + "router_id": "d23abc8d-2991-4a55-ba98-2aaea84cc72f", + "tenant_id": "4969c491a3c74ee4af974e6d800c62de", + "floating_network_id": "376da547-b977-4cfe-9cba-275c80debf57", + "fixed_ip_address": null, + "floating_ip_address": "172.24.4.228", + "port_id": null, + "id": "2f245a7b-796b-4f26-9cf9-9e82d248fda7" + } +} + `) + }) + + ip, err := floatingips.Update(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7", floatingips.UpdateOpts{PortID: nil}).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, "", ip.FixedIP) + th.AssertDeepEquals(t, "", ip.PortID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/floatingips/2f245a7b-796b-4f26-9cf9-9e82d248fda7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := floatingips.Delete(fake.ServiceClient(), "2f245a7b-796b-4f26-9cf9-9e82d248fda7") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go new file mode 100644 index 000000000000..1318a184caaa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/floatingips/urls.go @@ -0,0 +1,13 @@ +package floatingips + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "floatingips" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go new file mode 100644 index 000000000000..6ede7f5e1713 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/doc.go @@ -0,0 +1,108 @@ +/* +Package routers enables management and retrieval of Routers from the OpenStack +Networking service. + +Example to List Routers + + listOpts := routers.ListOpts{} + allPages, err := routers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRouters, err := routers.ExtractRouters(allPages) + if err != nil { + panic(err) + } + + for _, router := range allRoutes { + fmt.Printf("%+v\n", router) + } + +Example to Create a Router + + iTrue := true + gwi := routers.GatewayInfo{ + NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", + } + + createOpts := routers.CreateOpts{ + Name: "router_1", + AdminStateUp: &iTrue, + GatewayInfo: &gwi, + } + + router, err := routers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{{ + DestinationCIDR: "40.0.1.0/24", + NextHop: "10.1.0.10", + }} + + updateOpts := routers.UpdateOpts{ + Name: "new_name", + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove all Routes from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + routes := []routers.Route{} + + updateOpts := routers.UpdateOpts{ + Routes: routes, + } + + router, err := routers.Update(networkClient, routerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + err := routers.Delete(networkClient, routerID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Add an Interface to a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.AddInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.AddInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove an Interface from a Router + + routerID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + + intOpts := routers.RemoveInterfaceOpts{ + SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1", + } + + interface, err := routers.RemoveInterface(networkClient, routerID, intOpts).Extract() + if err != nil { + panic(err) + } +*/ +package routers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go new file mode 100644 index 000000000000..8b2bde530e9d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/requests.go @@ -0,0 +1,226 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Distributed *bool `q:"distributed"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// routers. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those routers that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return RouterPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRouterCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new router. There are +// no required values. +type CreateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` +} + +// ToRouterCreateMap builds a create request body from CreateOpts. +func (opts CreateOpts) ToRouterCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// logical router. When it is created, the router does not have an internal +// interface - it is not associated to any subnet. +// +// You can optionally specify an external gateway for a router using the +// GatewayInfo struct. The external gateway for the router must be plugged into +// an external network (it is external if its `router:external' field is set to +// true). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRouterCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular router based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRouterUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a router. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Distributed *bool `json:"distributed,omitempty"` + GatewayInfo *GatewayInfo `json:"external_gateway_info,omitempty"` + Routes []Route `json:"routes"` +} + +// ToRouterUpdateMap builds an update body based on UpdateOpts. +func (opts UpdateOpts) ToRouterUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "router") +} + +// Update allows routers to be updated. You can update the name, administrative +// state, and the external gateway. For more information about how to set the +// external gateway for a router, see Create. This operation does not enable +// the update of router interfaces. To do this, use the AddInterface and +// RemoveInterface functions. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRouterUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular router based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AddInterfaceOptsBuilder allows extensions to add additional parameters to +// the AddInterface request. +type AddInterfaceOptsBuilder interface { + ToRouterAddInterfaceMap() (map[string]interface{}, error) +} + +// AddInterfaceOpts represents the options for adding an interface to a router. +type AddInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" xor:"PortID"` + PortID string `json:"port_id,omitempty" xor:"SubnetID"` +} + +// ToRouterAddInterfaceMap builds a request body from AddInterfaceOpts. +func (opts AddInterfaceOpts) ToRouterAddInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// AddInterface attaches a subnet to an internal router interface. You must +// specify either a SubnetID or PortID in the request body. If you specify both, +// the operation will fail and an error will be returned. +// +// If you specify a SubnetID, the gateway IP address for that particular subnet +// is used to create the router interface. Alternatively, if you specify a +// PortID, the IP address associated with the port is used to create the router +// interface. +// +// If you reference a port that is associated with multiple IP addresses, or +// if the port is associated with zero IP addresses, the operation will fail and +// a 400 Bad Request error will be returned. +// +// If you reference a port already in use, the operation will fail and a 409 +// Conflict error will be returned. +// +// The PortID that is returned after using Extract() on the result of this +// operation can either be the same PortID passed in or, on the other hand, the +// identifier of a new port created by this operation. After the operation +// completes, the device ID of the port is set to the router ID, and the +// device owner attribute is set to `network:router_interface'. +func AddInterface(c *gophercloud.ServiceClient, id string, opts AddInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterAddInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(addInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveInterfaceOptsBuilder allows extensions to add additional parameters to +// the RemoveInterface request. +type RemoveInterfaceOptsBuilder interface { + ToRouterRemoveInterfaceMap() (map[string]interface{}, error) +} + +// RemoveInterfaceOpts represents options for removing an interface from +// a router. +type RemoveInterfaceOpts struct { + SubnetID string `json:"subnet_id,omitempty" or:"PortID"` + PortID string `json:"port_id,omitempty" or:"SubnetID"` +} + +// ToRouterRemoveInterfaceMap builds a request body based on +// RemoveInterfaceOpts. +func (opts RemoveInterfaceOpts) ToRouterRemoveInterfaceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// RemoveInterface removes an internal router interface, which detaches a +// subnet from the router. You must specify either a SubnetID or PortID, since +// these values are used to identify the router interface to remove. +// +// Unlike AddInterface, you can also specify both a SubnetID and PortID. If you +// choose to specify both, the subnet ID must correspond to the subnet ID of +// the first IP address on the port specified by the port ID. Otherwise, the +// operation will fail and return a 409 Conflict error. +// +// If the router, subnet or port which are referenced do not exist or are not +// visible to you, the operation will fail and a 404 Not Found error will be +// returned. After this operation completes, the port connecting the router +// with the subnet is removed from the subnet for the network. +func RemoveInterface(c *gophercloud.ServiceClient, id string, opts RemoveInterfaceOptsBuilder) (r InterfaceResult) { + b, err := opts.ToRouterRemoveInterfaceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(removeInterfaceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go new file mode 100644 index 000000000000..dffdce8f48f1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/results.go @@ -0,0 +1,175 @@ +package routers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// GatewayInfo represents the information of an external gateway for any +// particular network router. +type GatewayInfo struct { + NetworkID string `json:"network_id"` + EnableSNAT *bool `json:"enable_snat,omitempty"` + ExternalFixedIPs []ExternalFixedIP `json:"external_fixed_ips,omitempty"` +} + +// ExternalFixedIP is the IP address and subnet ID of the external gateway of a +// router. +type ExternalFixedIP struct { + IPAddress string `json:"ip_address"` + SubnetID string `json:"subnet_id"` +} + +// Route is a possible route in a router. +type Route struct { + NextHop string `json:"nexthop"` + DestinationCIDR string `json:"destination"` +} + +// Router represents a Neutron router. A router is a logical entity that +// forwards packets across internal subnets and NATs (network address +// translation) them on external networks through an appropriate gateway. +// +// A router has an interface for each subnet with which it is associated. By +// default, the IP address of such interface is the subnet's gateway IP. Also, +// whenever a router is associated with a subnet, a port for that router +// interface is added to the subnet's network. +type Router struct { + // Status indicates whether or not a router is currently operational. + Status string `json:"status"` + + // GateayInfo provides information on external gateway for the router. + GatewayInfo GatewayInfo `json:"external_gateway_info"` + + // AdminStateUp is the administrative state of the router. + AdminStateUp bool `json:"admin_state_up"` + + // Distributed is whether router is disitrubted or not. + Distributed bool `json:"distributed"` + + // Name is the human readable name for the router. It does not have to be + // unique. + Name string `json:"name"` + + // ID is the unique identifier for the router. + ID string `json:"id"` + + // TenantID is the project owner of the router. Only admin users can + // specify a project identifier other than its own. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the router. + ProjectID string `json:"project_id"` + + // Routes are a collection of static routes that the router will host. + Routes []Route `json:"routes"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` +} + +// RouterPage is the page returned by a pager when traversing over a +// collection of routers. +type RouterPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r RouterPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"routers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a RouterPage struct is empty. +func (r RouterPage) IsEmpty() (bool, error) { + is, err := ExtractRouters(r) + return len(is) == 0, err +} + +// ExtractRouters accepts a Page struct, specifically a RouterPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRouters(r pagination.Page) ([]Router, error) { + var s struct { + Routers []Router `json:"routers"` + } + err := (r.(RouterPage)).ExtractInto(&s) + return s.Routers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Router, error) { + var s struct { + Router *Router `json:"router"` + } + err := r.ExtractInto(&s) + return s.Router, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Router. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Router. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Router. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its ExtractErr +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// InterfaceInfo represents information about a particular router interface. As +// mentioned above, in order for a router to forward to a subnet, it needs an +// interface. +type InterfaceInfo struct { + // SubnetID is the ID of the subnet which this interface is associated with. + SubnetID string `json:"subnet_id"` + + // PortID is the ID of the port that is a part of the subnet. + PortID string `json:"port_id"` + + // ID is the UUID of the interface. + ID string `json:"id"` + + // TenantID is the owner of the interface. + TenantID string `json:"tenant_id"` +} + +// InterfaceResult represents the result of interface operations, such as +// AddInterface() and RemoveInterface(). Call its Extract method to interpret +// the result as a InterfaceInfo. +type InterfaceResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an information struct. +func (r InterfaceResult) Extract() (*InterfaceInfo, error) { + var s InterfaceInfo + err := r.ExtractInto(&s) + return &s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/doc.go new file mode 100644 index 000000000000..4bfd0b5f217d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/doc.go @@ -0,0 +1,2 @@ +// routers unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/requests_test.go new file mode 100644 index 000000000000..2f7b93d4b92a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/testing/requests_test.go @@ -0,0 +1,471 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "routers": [ + { + "status": "ACTIVE", + "external_gateway_info": null, + "name": "second_routers", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "distributed": false, + "id": "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b" + }, + { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8" + }, + "name": "router1", + "admin_state_up": true, + "tenant_id": "33a40233088643acb66ff6eb0ebea679", + "distributed": false, + "id": "a9254bdb-2613-4a13-ac4c-adc581fba50d" + }, + { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "2b37576e-b050-4891-8b20-e1e37a93942a", + "external_fixed_ips": [ + {"ip_address": "192.0.2.17", "subnet_id": "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def"}, + {"ip_address": "198.51.100.33", "subnet_id": "1d699529-bdfd-43f8-bcaa-bff00c547af2"} + ] + }, + "name": "gateway", + "admin_state_up": true, + "tenant_id": "a3e881e0a6534880c5473d95b9442099", + "distributed": false, + "id": "308a035c-005d-4452-a9fe-6f8f2f0c28d8" + } + ] +} + `) + }) + + count := 0 + + routers.List(fake.ServiceClient(), routers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := routers.ExtractRouters(page) + if err != nil { + t.Errorf("Failed to extract routers: %v", err) + return false, err + } + + expected := []routers.Router{ + { + Status: "ACTIVE", + GatewayInfo: routers.GatewayInfo{NetworkID: ""}, + AdminStateUp: true, + Distributed: false, + Name: "second_routers", + ID: "7177abc4-5ae9-4bb7-b0d4-89e94a4abf3b", + TenantID: "6b96ff0cb17a4b859e1e575d221683d3", + }, + { + Status: "ACTIVE", + GatewayInfo: routers.GatewayInfo{NetworkID: "3c5bcddd-6af9-4e6b-9c3e-c153e521cab8"}, + AdminStateUp: true, + Distributed: false, + Name: "router1", + ID: "a9254bdb-2613-4a13-ac4c-adc581fba50d", + TenantID: "33a40233088643acb66ff6eb0ebea679", + }, + { + Status: "ACTIVE", + GatewayInfo: routers.GatewayInfo{ + NetworkID: "2b37576e-b050-4891-8b20-e1e37a93942a", + ExternalFixedIPs: []routers.ExternalFixedIP{ + {IPAddress: "192.0.2.17", SubnetID: "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def"}, + {IPAddress: "198.51.100.33", SubnetID: "1d699529-bdfd-43f8-bcaa-bff00c547af2"}, + }, + }, + AdminStateUp: true, + Distributed: false, + Name: "gateway", + ID: "308a035c-005d-4452-a9fe-6f8f2f0c28d8", + TenantID: "a3e881e0a6534880c5473d95b9442099", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router":{ + "name": "foo_router", + "admin_state_up": false, + "external_gateway_info":{ + "enable_snat": false, + "network_id":"8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "availability_zone_hints": ["zone1", "zone2"] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "enable_snat": false, + "external_fixed_ips": [ + {"ip_address": "192.0.2.17", "subnet_id": "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def"} + ] + }, + "name": "foo_router", + "admin_state_up": false, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "distributed": false, + "availability_zone_hints": ["zone1", "zone2"], + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e" + } +} + `) + }) + + asu := false + enableSNAT := false + gwi := routers.GatewayInfo{ + NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b", + EnableSNAT: &enableSNAT, + } + options := routers.CreateOpts{ + Name: "foo_router", + AdminStateUp: &asu, + GatewayInfo: &gwi, + AvailabilityZoneHints: []string{"zone1", "zone2"}, + } + r, err := routers.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + gwi.ExternalFixedIPs = []routers.ExternalFixedIP{{ + IPAddress: "192.0.2.17", + SubnetID: "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def", + }} + + th.AssertEquals(t, "foo_router", r.Name) + th.AssertEquals(t, false, r.AdminStateUp) + th.AssertDeepEquals(t, gwi, r.GatewayInfo) + th.AssertDeepEquals(t, []string{"zone1", "zone2"}, r.AvailabilityZoneHints) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/a07eea83-7710-4860-931b-5fe220fae533", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "85d76829-6415-48ff-9c63-5c5ca8c61ac6", + "external_fixed_ips": [ + {"ip_address": "198.51.100.33", "subnet_id": "1d699529-bdfd-43f8-bcaa-bff00c547af2"} + ] + }, + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ], + "name": "router1", + "admin_state_up": true, + "tenant_id": "d6554fe62e2f41efbb6e026fad5c1542", + "distributed": false, + "availability_zone_hints": ["zone1", "zone2"], + "id": "a07eea83-7710-4860-931b-5fe220fae533" + } +} + `) + }) + + n, err := routers.Get(fake.ServiceClient(), "a07eea83-7710-4860-931b-5fe220fae533").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, n.GatewayInfo, routers.GatewayInfo{ + NetworkID: "85d76829-6415-48ff-9c63-5c5ca8c61ac6", + ExternalFixedIPs: []routers.ExternalFixedIP{ + {IPAddress: "198.51.100.33", SubnetID: "1d699529-bdfd-43f8-bcaa-bff00c547af2"}, + }, + }) + th.AssertEquals(t, n.Name, "router1") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.TenantID, "d6554fe62e2f41efbb6e026fad5c1542") + th.AssertEquals(t, n.ID, "a07eea83-7710-4860-931b-5fe220fae533") + th.AssertDeepEquals(t, n.Routes, []routers.Route{{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}}) + th.AssertDeepEquals(t, n.AvailabilityZoneHints, []string{"zone1", "zone2"}) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router": { + "name": "new_name", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "external_fixed_ips": [ + {"ip_address": "192.0.2.17", "subnet_id": "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def"} + ] + }, + "name": "new_name", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "distributed": false, + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e", + "routes": [ + { + "nexthop": "10.1.0.10", + "destination": "40.0.1.0/24" + } + ] + } +} + `) + }) + + gwi := routers.GatewayInfo{NetworkID: "8ca37218-28ff-41cb-9b10-039601ea7e6b"} + r := []routers.Route{{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}} + options := routers.UpdateOpts{Name: "new_name", GatewayInfo: &gwi, Routes: r} + + n, err := routers.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + gwi.ExternalFixedIPs = []routers.ExternalFixedIP{ + {IPAddress: "192.0.2.17", SubnetID: "ab561bc4-1a8e-48f2-9fbd-376fcb1a1def"}, + } + + th.AssertEquals(t, n.Name, "new_name") + th.AssertDeepEquals(t, n.GatewayInfo, gwi) + th.AssertDeepEquals(t, n.Routes, []routers.Route{{DestinationCIDR: "40.0.1.0/24", NextHop: "10.1.0.10"}}) +} + +func TestAllRoutesRemoved(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "router": { + "routes": [] + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "router": { + "status": "ACTIVE", + "external_gateway_info": { + "network_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b" + }, + "name": "name", + "admin_state_up": true, + "tenant_id": "6b96ff0cb17a4b859e1e575d221683d3", + "distributed": false, + "id": "8604a0de-7f6b-409a-a47c-a1cc7bc77b2e", + "routes": [] + } +} + `) + }) + + r := []routers.Route{} + options := routers.UpdateOpts{Routes: r} + + n, err := routers.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, n.Routes, []routers.Route{}) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := routers.Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} + +func TestAddInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/add_router_interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1" +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31", + "id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770" +} +`) + }) + + opts := routers.AddInterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"} + res, err := routers.AddInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID) + th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID) + th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID) +} + +func TestAddInterfaceRequiredOpts(t *testing.T) { + _, err := routers.AddInterface(fake.ServiceClient(), "foo", routers.AddInterfaceOpts{}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } + _, err = routers.AddInterface(fake.ServiceClient(), "foo", routers.AddInterfaceOpts{SubnetID: "bar", PortID: "baz"}).Extract() + if err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestRemoveInterface(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/routers/4e8e5957-649f-477b-9e5b-f1f75b21c03c/remove_router_interface", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "subnet_id": "a2f1f29d-571b-4533-907f-5803ab96ead1" +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "subnet_id": "0d32a837-8069-4ec3-84c4-3eef3e10b188", + "tenant_id": "017d8de156df4177889f31a9bd6edc00", + "port_id": "3f990102-4485-4df1-97a0-2c35bdb85b31", + "id": "9a83fa11-8da5-436e-9afe-3d3ac5ce7770" +} +`) + }) + + opts := routers.RemoveInterfaceOpts{SubnetID: "a2f1f29d-571b-4533-907f-5803ab96ead1"} + res, err := routers.RemoveInterface(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "0d32a837-8069-4ec3-84c4-3eef3e10b188", res.SubnetID) + th.AssertEquals(t, "017d8de156df4177889f31a9bd6edc00", res.TenantID) + th.AssertEquals(t, "3f990102-4485-4df1-97a0-2c35bdb85b31", res.PortID) + th.AssertEquals(t, "9a83fa11-8da5-436e-9afe-3d3ac5ce7770", res.ID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go new file mode 100644 index 000000000000..f9e9da321174 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/layer3/routers/urls.go @@ -0,0 +1,21 @@ +package routers + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "routers" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func addInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "add_router_interface") +} + +func removeInterfaceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id, "remove_router_interface") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go new file mode 100644 index 000000000000..bc1fc282f4ee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/doc.go @@ -0,0 +1,3 @@ +// Package lbaas provides information and interaction with the Load Balancer +// as a Service extension for the OpenStack Networking service. +package lbaas diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go new file mode 100644 index 000000000000..bad3324b5ed0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/doc.go @@ -0,0 +1,59 @@ +/* +Package members provides information and interaction with Members of the +Load Balancer as a Service extension for the OpenStack Networking service. + +Example to List Members + + listOpts := members.ListOpts{ + ProtocolPort: 80, + } + + allPages, err := members.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := members.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := range allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + createOpts := members.CreateOpts{ + Address: "192.168.2.14", + ProtocolPort: 80, + PoolID: "0b266a12-0fdf-4434-bd11-649d84e54bd5" + } + + member, err := members.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + + updateOpts := members.UpdateOpts{ + AdminStateUp: gophercloud.Disabled, + } + + member, err := members.Update(networkClient, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + memberID := "46592c54-03f7-40ef-9cdf-b1fcf2775ddf" + err := members.Delete(networkClient, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package members diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go new file mode 100644 index 000000000000..1a3128844405 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/requests.go @@ -0,0 +1,124 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + PoolID string `q:"pool_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMemberCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool member. +type CreateOpts struct { + // Address is the IP address of the member. + Address string `json:"address" required:"true"` + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the pool to which this member will belong. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` +} + +// ToLBMemberCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool member. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool member based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool member. +type UpdateOpts struct { + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMemberUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows members to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// Delete will permanently delete a particular member based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go new file mode 100644 index 000000000000..804dbe8445f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/results.go @@ -0,0 +1,109 @@ +package members + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Member represents the application running on a backend server. +type Member struct { + // Status is the status of the member. Indicates whether the member + // is operational. + Status string + + // Weight is the weight of member. + Weight int + + // AdminStateUp is the administrative state of the member, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // TenantID is the owner of the member. + TenantID string `json:"tenant_id"` + + // PoolID is the pool to which the member belongs. + PoolID string `json:"pool_id"` + + // Address is the IP address of the member. + Address string + + // ProtocolPort is the port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // ID is the unique ID for the member. + ID string +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of pool members. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Member structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a member. +func (r commonResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Member. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Member. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Member. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/doc.go new file mode 100644 index 000000000000..1afbc434f603 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/doc.go @@ -0,0 +1,2 @@ +// members unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/requests_test.go new file mode 100644 index 000000000000..3e4f1d43f6ba --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/testing/requests_test.go @@ -0,0 +1,238 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "members":[ + { + "status":"ACTIVE", + "weight":1, + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab", + "address":"10.0.0.4", + "protocol_port":80, + "id":"701b531b-111a-4f21-ad85-4795b7b12af6" + }, + { + "status":"ACTIVE", + "weight":1, + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "pool_id":"72741b06-df4d-4715-b142-276b6bce75ab", + "address":"10.0.0.3", + "protocol_port":80, + "id":"beb53b4d-230b-4abd-8118-575b8fa006ef" + } + ] +} + `) + }) + + count := 0 + + members.List(fake.ServiceClient(), members.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := members.ExtractMembers(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []members.Member{ + { + Status: "ACTIVE", + Weight: 1, + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + PoolID: "72741b06-df4d-4715-b142-276b6bce75ab", + Address: "10.0.0.4", + ProtocolPort: 80, + ID: "701b531b-111a-4f21-ad85-4795b7b12af6", + }, + { + Status: "ACTIVE", + Weight: 1, + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + PoolID: "72741b06-df4d-4715-b142-276b6bce75ab", + Address: "10.0.0.3", + ProtocolPort: 80, + ID: "beb53b4d-230b-4abd-8118-575b8fa006ef", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "member": { + "tenant_id": "453105b9-1754-413f-aab1-55f1af620750", + "pool_id": "foo", + "address": "192.0.2.14", + "protocol_port":8080 + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "member": { + "id": "975592ca-e308-48ad-8298-731935ee9f45", + "address": "192.0.2.14", + "protocol_port": 8080, + "tenant_id": "453105b9-1754-413f-aab1-55f1af620750", + "admin_state_up":true, + "weight": 1, + "status": "DOWN" + } +} + `) + }) + + options := members.CreateOpts{ + TenantID: "453105b9-1754-413f-aab1-55f1af620750", + Address: "192.0.2.14", + ProtocolPort: 8080, + PoolID: "foo", + } + _, err := members.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/975592ca-e308-48ad-8298-731935ee9f45", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "member":{ + "id":"975592ca-e308-48ad-8298-731935ee9f45", + "address":"192.0.2.14", + "protocol_port":8080, + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "admin_state_up":true, + "weight":1, + "status":"DOWN" + } +} + `) + }) + + m, err := members.Get(fake.ServiceClient(), "975592ca-e308-48ad-8298-731935ee9f45").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "975592ca-e308-48ad-8298-731935ee9f45", m.ID) + th.AssertEquals(t, "192.0.2.14", m.Address) + th.AssertEquals(t, 8080, m.ProtocolPort) + th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", m.TenantID) + th.AssertEquals(t, true, m.AdminStateUp) + th.AssertEquals(t, 1, m.Weight) + th.AssertEquals(t, "DOWN", m.Status) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "member":{ + "admin_state_up":false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "member":{ + "status":"PENDING_UPDATE", + "protocol_port":8080, + "weight":1, + "admin_state_up":false, + "tenant_id":"4fd44f30292945e481c7b8a0c8908869", + "pool_id":"7803631d-f181-4500-b3a2-1b68ba2a75fd", + "address":"10.0.0.5", + "status_description":null, + "id":"48a471ea-64f1-4eb6-9be7-dae6bbe40a0f" + } +} + `) + }) + + options := members.UpdateOpts{AdminStateUp: gophercloud.Disabled} + + _, err := members.Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract() + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/members/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := members.Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go new file mode 100644 index 000000000000..e2248f81f45b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/members/urls.go @@ -0,0 +1,16 @@ +package members + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go new file mode 100644 index 000000000000..b5c0f29f0546 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/doc.go @@ -0,0 +1,63 @@ +/* +Package monitors provides information and interaction with the Monitors +of the Load Balancer as a Service extension for the OpenStack Networking +Service. + +Example to List Monitors + + listOpts: monitors.ListOpts{ + Type: "HTTP", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Delay: 20, + Timeout: 20, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + + updateOpts := monitors.UpdateOpts{ + Timeout: 30, + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + monitorID := "681aed03-aadb-43ae-aead-b9016375650a" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go new file mode 100644 index 000000000000..9ed0c769c900 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/requests.go @@ -0,0 +1,227 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + TenantID string `q:"tenant_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// MonitorType is the type for all the types of LB monitors. +type MonitorType string + +// Constants that represent approved monitoring types. +const ( + TypePING MonitorType = "PING" + TypeTCP MonitorType = "TCP" + TypeHTTP MonitorType = "HTTP" + TypeHTTPS MonitorType = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new health monitor. +type CreateOpts struct { + // MonitorType is the type of probe, which is PING, TCP, HTTP, or HTTPS, + // that is sent by the load balancer to verify the member state. + Type MonitorType `json:"type" required:"true"` + + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". Required for HTTP(S) + // types. + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". Required for HTTP(S) types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLBMonitorCreateMap() (map[string]interface{}, error) { + if opts.Type == TypeHTTP || opts.Type == TypeHTTPS { + if opts.URLPath == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.URLPath" + return nil, err + } + if opts.ExpectedCodes == "" { + err := gophercloud.ErrMissingInput{} + err.Argument = "monitors.CreateOpts.ExpectedCodes" + return nil, err + } + } + if opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Create is an operation which provisions a new health monitor. There are +// different types of monitor you can provision: PING, TCP or HTTP(S). Below +// are examples of how to create each one. +// +// Here is an example config struct to use when creating a PING or TCP monitor: +// +// CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} +// CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} +// +// Here is an example config struct to use when creating a HTTP(S) monitor: +// +// CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, +// HttpMethod: "HEAD", ExpectedCodes: "200"} +// +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular health monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLBMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing monitor. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Delay is the time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Timeout is the maximum number of seconds for a monitor to wait for a ping + // reply before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // MaxRetries is the number of permissible ping failures before changing the + // member's status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URLPath is the URI path that will be accessed if monitor type + // is HTTP or HTTPS. + URLPath string `json:"url_path,omitempty"` + + // HTTPMethod is the HTTP method used for requests by the monitor. If this + // attribute is not specified, it defaults to "GET". + HTTPMethod string `json:"http_method,omitempty"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor + // You can either specify a single status like "200", or a range like + // "200-202". + ExpectedCodes string `json:"expected_codes,omitempty"` + + // AdminStateUp denotes whether the monitor is administratively up or down. + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLBMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLBMonitorUpdateMap() (map[string]interface{}, error) { + if opts.Delay > 0 && opts.Timeout > 0 && opts.Delay < opts.Timeout { + err := gophercloud.ErrInvalidInput{} + err.Argument = "monitors.CreateOpts.Delay/monitors.CreateOpts.Timeout" + err.Value = fmt.Sprintf("%d/%d", opts.Delay, opts.Timeout) + err.Info = "Delay must be greater than or equal to timeout" + return nil, err + } + return gophercloud.BuildRequestBody(opts, "health_monitor") +} + +// Update is an operation which modifies the attributes of the specified +// monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go new file mode 100644 index 000000000000..cc99f7cced70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/results.go @@ -0,0 +1,141 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // ID is the unique ID for the Monitor. + ID string + + // Name is the monitor name. Does not have to be unique. + Name string + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // Type is the type of probe sent by the load balancer to verify the member + // state, which is PING, TCP, HTTP, or HTTPS. + Type string + + // Delay is the time, in seconds, between sending probes to members. + Delay int + + // Timeout is the maximum number of seconds for a monitor to wait for a + // connection to be established before it times out. This value must be less + // than the delay value. + Timeout int + + // MaxRetries is the number of allowed connection failures before changing the + // status of the member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // HTTPMethod is the HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // URLPath is the HTTP path of the request sent by the monitor to test the + // health of a member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path"` + + // ExpectedCodes is the expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // AdminStateUp is the administrative state of the health monitor, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the health monitor. Indicates whether the health + // monitor is operational. + Status string +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"health_monitors_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"health_monitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"health_monitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its Extract +// method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/doc.go new file mode 100644 index 000000000000..e2b6f12a9285 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/doc.go @@ -0,0 +1,2 @@ +// monitors unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/requests_test.go new file mode 100644 index 000000000000..f7360743b035 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/testing/requests_test.go @@ -0,0 +1,310 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "health_monitors":[ + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":10, + "max_retries":1, + "timeout":1, + "type":"PING", + "id":"466c8345-28d8-4f84-a246-e04380b0461d" + }, + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } + ] +} + `) + }) + + count := 0 + + monitors.List(fake.ServiceClient(), monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := monitors.ExtractMonitors(page) + if err != nil { + t.Errorf("Failed to extract monitors: %v", err) + return false, err + } + + expected := []monitors.Monitor{ + { + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 10, + MaxRetries: 1, + Timeout: 1, + Type: "PING", + ID: "466c8345-28d8-4f84-a246-e04380b0461d", + }, + { + AdminStateUp: true, + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 5, + ExpectedCodes: "200", + MaxRetries: 2, + Timeout: 2, + URLPath: "/", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestDelayMustBeGreaterOrEqualThanTimeout(t *testing.T) { + _, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + Delay: 1, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } + + _, err = monitors.Update(fake.ServiceClient(), "453105b9-1754-413f-aab1-55f1af620750", monitors.UpdateOpts{ + Delay: 1, + Timeout: 10, + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "type":"HTTP", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "delay":20, + "timeout":10, + "max_retries":5, + "url_path":"/check", + "expected_codes":"200-299" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "health_monitor":{ + "id":"f3eeab00-8367-4524-b662-55e64d4cacb5", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "type":"HTTP", + "delay":20, + "timeout":10, + "max_retries":5, + "http_method":"GET", + "url_path":"/check", + "expected_codes":"200-299", + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + _, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + TenantID: "453105b9-1754-413f-aab1-55f1af620750", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = monitors.Create(fake.ServiceClient(), monitors.CreateOpts{Type: monitors.TypeHTTP}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/f3eeab00-8367-4524-b662-55e64d4cacb5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "health_monitor":{ + "id":"f3eeab00-8367-4524-b662-55e64d4cacb5", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "type":"HTTP", + "delay":20, + "timeout":10, + "max_retries":5, + "http_method":"GET", + "url_path":"/check", + "expected_codes":"200-299", + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + hm, err := monitors.Get(fake.ServiceClient(), "f3eeab00-8367-4524-b662-55e64d4cacb5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "f3eeab00-8367-4524-b662-55e64d4cacb5", hm.ID) + th.AssertEquals(t, "453105b9-1754-413f-aab1-55f1af620750", hm.TenantID) + th.AssertEquals(t, "HTTP", hm.Type) + th.AssertEquals(t, 20, hm.Delay) + th.AssertEquals(t, 10, hm.Timeout) + th.AssertEquals(t, 5, hm.MaxRetries) + th.AssertEquals(t, "GET", hm.HTTPMethod) + th.AssertEquals(t, "/check", hm.URLPath) + th.AssertEquals(t, "200-299", hm.ExpectedCodes) + th.AssertEquals(t, true, hm.AdminStateUp) + th.AssertEquals(t, "ACTIVE", hm.Status) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "delay": 30, + "timeout": 20, + "max_retries": 10, + "url_path": "/another_check", + "expected_codes": "301", + "admin_state_up": true + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "health_monitor": { + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "delay": 30, + "max_retries": 10, + "http_method": "GET", + "timeout": 20, + "pools": [ + { + "status": "PENDING_CREATE", + "status_description": null, + "pool_id": "6e55751f-6ad4-4e53-b8d4-02e442cd21df" + } + ], + "type": "PING", + "id": "b05e44b5-81f9-4551-b474-711a722698f7" + } +} + `) + }) + + _, err := monitors.Update(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7", monitors.UpdateOpts{ + Delay: 30, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + AdminStateUp: gophercloud.Enabled, + }).Extract() + + th.AssertNoErr(t, err) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/health_monitors/b05e44b5-81f9-4551-b474-711a722698f7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := monitors.Delete(fake.ServiceClient(), "b05e44b5-81f9-4551-b474-711a722698f7") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go new file mode 100644 index 000000000000..e9d90fcc5692 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go new file mode 100644 index 000000000000..25c4204dc930 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/doc.go @@ -0,0 +1,81 @@ +/* +Package pools provides information and interaction with the Pools of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractPools(allPages) + if err != nil { + panic(err) + } + + for _, pool := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Provider: "haproxy", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + + updateOpts := pools.UpdateOpts{ + LBMethod: pools.LBMethodLeastConnections, + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Associate a Monitor to a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.AssociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } + +Example to Disassociate a Monitor from a Pool + + poolID := "166db5e6-c72a-4d77-8776-3573e27ae271" + monitorID := "8bbfbe1c-6faa-4d97-abdb-0df6c90df70b" + + pool, err := pools.DisassociateMonitor(networkClient, poolID, monitorID).Extract() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go new file mode 100644 index 000000000000..b3593548d371 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/requests.go @@ -0,0 +1,175 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + LBMethod string `q:"lb_method"` + Protocol string `q:"protocol"` + SubnetID string `q:"subnet_id"` + TenantID string `q:"tenant_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + VIPID string `q:"vip_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// LBMethod is a type used for possible load balancing methods. +type LBMethod string + +// LBProtocol is a type used for possible load balancing protocols. +type LBProtocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + + ProtocolTCP LBProtocol = "TCP" + ProtocolHTTP LBProtocol = "HTTP" + ProtocolHTTPS LBProtocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLBPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new pool. +type CreateOpts struct { + // Name of the pool. + Name string `json:"name" required:"true"` + + // Protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol LBProtocol `json:"protocol" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method" required:"true"` + + // Provider of the pool. + Provider string `json:"provider,omitempty"` +} + +// ToLBPoolCreateMap builds a request body based on CreateOpts. +func (opts CreateOpts) ToLBPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOptsBuilder and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLBPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters ot the +// Update request. +type UpdateOptsBuilder interface { + ToLBPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a pool. +type UpdateOpts struct { + // Name of the pool. + Name string `json:"name,omitempty"` + + // LBMethod is the algorithm used to distribute load between the members of + // the pool. The current specification supports LBMethodRoundRobin and + // LBMethodLeastConnections as valid values for this attribute. + LBMethod LBMethod `json:"lb_method,omitempty"` +} + +// ToLBPoolUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToLBPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToLBPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// AssociateMonitor will associate a health monitor with a particular pool. +// Once associated, the health monitor will start monitoring the members of the +// pool and will deactivate these members if they are deemed unhealthy. A +// member can be deactivated (status set to INACTIVE) if any of health monitors +// finds it unhealthy. +func AssociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + b := map[string]interface{}{"health_monitor": map[string]string{"id": monitorID}} + _, r.Err = c.Post(associateURL(c, poolID), b, &r.Body, nil) + return +} + +// DisassociateMonitor will disassociate a health monitor with a particular +// pool. When dissociation is successful, the health monitor will no longer +// check for the health of the members of the pool. +func DisassociateMonitor(c *gophercloud.ServiceClient, poolID, monitorID string) (r AssociateResult) { + _, r.Err = c.Delete(disassociateURL(c, poolID, monitorID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go new file mode 100644 index 000000000000..c2bae82d56ea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/results.go @@ -0,0 +1,137 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a member of the pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +// There is only one pool per virtual IP. +type Pool struct { + // Status of the pool. Indicates whether the pool is operational. + Status string + + // LBMethod is the load-balancer algorithm, which is round-robin, + // least-connections, and so on. This value, which must be supported, is + // dependent on the provider. + LBMethod string `json:"lb_method"` + + // Protocol of the pool, which is TCP, HTTP, or HTTPS. + Protocol string + + // Description for the pool. + Description string + + // MonitorIDs are the IDs of associated monitors which check the health of + // the pool members. + MonitorIDs []string `json:"health_monitors"` + + // SubnetID is the network on which the members of the pool will be located. + // Only members that are on this network can be added to the pool. + SubnetID string `json:"subnet_id"` + + // TenantID is the owner of the pool. + TenantID string `json:"tenant_id"` + + // AdminStateUp is the administrative state of the pool, which is up + // (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Name of the pool. + Name string + + // MemberIDs is the list of member IDs that belong to the pool. + MemberIDs []string `json:"members"` + + // ID is the unique ID for the pool. + ID string + + // VIPID is the ID of the virtual IP associated with this pool. + VIPID string `json:"vip_id"` + + // The provider. + Provider string +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Router structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a router. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to interpret it as a Pool. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AssociateResult represents the result of an association operation. Call its Extract +// method to interpret it as a Pool. +type AssociateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/doc.go new file mode 100644 index 000000000000..46e335f3f2d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/doc.go @@ -0,0 +1,2 @@ +// pools unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/requests_test.go new file mode 100644 index 000000000000..de038cb8cdb6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/testing/requests_test.go @@ -0,0 +1,316 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pools":[ + { + "status":"ACTIVE", + "lb_method":"ROUND_ROBIN", + "protocol":"HTTP", + "description":"", + "health_monitors":[ + "466c8345-28d8-4f84-a246-e04380b0461d", + "5d4b5228-33b0-4e60-b225-9b727c1a20e7" + ], + "members":[ + "701b531b-111a-4f21-ad85-4795b7b12af6", + "beb53b4d-230b-4abd-8118-575b8fa006ef" + ], + "status_description": null, + "id":"72741b06-df4d-4715-b142-276b6bce75ab", + "vip_id":"4ec89087-d057-4e2c-911f-60a3b47ee304", + "name":"app_pool", + "admin_state_up":true, + "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "health_monitors_status": [], + "provider": "haproxy" + } + ] +} + `) + }) + + count := 0 + + pools.List(fake.ServiceClient(), pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := pools.ExtractPools(page) + if err != nil { + t.Errorf("Failed to extract pools: %v", err) + return false, err + } + + expected := []pools.Pool{ + { + Status: "ACTIVE", + LBMethod: "ROUND_ROBIN", + Protocol: "HTTP", + Description: "", + MonitorIDs: []string{ + "466c8345-28d8-4f84-a246-e04380b0461d", + "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + }, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "app_pool", + MemberIDs: []string{ + "701b531b-111a-4f21-ad85-4795b7b12af6", + "beb53b4d-230b-4abd-8118-575b8fa006ef", + }, + ID: "72741b06-df4d-4715-b142-276b6bce75ab", + VIPID: "4ec89087-d057-4e2c-911f-60a3b47ee304", + Provider: "haproxy", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "pool": { + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", + "name": "Example pool", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "provider": "haproxy" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "pool": { + "status": "PENDING_CREATE", + "lb_method": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "", + "health_monitors": [], + "members": [], + "status_description": null, + "id": "69055154-f603-4a28-8951-7cc2d9e54a9a", + "vip_id": null, + "name": "Example pool", + "admin_state_up": true, + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "health_monitors_status": [], + "provider": "haproxy" + } +} + `) + }) + + options := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + Provider: "haproxy", + } + p, err := pools.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_CREATE", p.Status) + th.AssertEquals(t, "ROUND_ROBIN", p.LBMethod) + th.AssertEquals(t, "HTTP", p.Protocol) + th.AssertEquals(t, "", p.Description) + th.AssertDeepEquals(t, []string{}, p.MonitorIDs) + th.AssertDeepEquals(t, []string{}, p.MemberIDs) + th.AssertEquals(t, "69055154-f603-4a28-8951-7cc2d9e54a9a", p.ID) + th.AssertEquals(t, "Example pool", p.Name) + th.AssertEquals(t, "1981f108-3c48-48d2-b908-30f7d28532c9", p.SubnetID) + th.AssertEquals(t, "2ffc6e22aae24e4795f87155d24c896f", p.TenantID) + th.AssertEquals(t, "haproxy", p.Provider) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pool":{ + "id":"332abe93-f488-41ba-870b-2ac66be7f853", + "tenant_id":"19eaa775-cf5d-49bc-902e-2f85f668d995", + "name":"Example pool", + "description":"", + "protocol":"tcp", + "lb_algorithm":"ROUND_ROBIN", + "session_persistence":{ + }, + "healthmonitor_id":null, + "members":[ + ], + "admin_state_up":true, + "status":"ACTIVE" + } +} + `) + }) + + n, err := pools.Get(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.ID, "332abe93-f488-41ba-870b-2ac66be7f853") +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "pool":{ + "name":"SuperPool", + "lb_method": "LEAST_CONNECTIONS" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "pool":{ + "status":"PENDING_UPDATE", + "lb_method":"LEAST_CONNECTIONS", + "protocol":"TCP", + "description":"", + "health_monitors":[ + + ], + "subnet_id":"8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "admin_state_up":true, + "name":"SuperPool", + "members":[ + + ], + "id":"61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "vip_id":null + } +} + `) + }) + + options := pools.UpdateOpts{Name: "SuperPool", LBMethod: pools.LBMethodLeastConnections} + + n, err := pools.Update(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "SuperPool", n.Name) + th.AssertDeepEquals(t, "LEAST_CONNECTIONS", n.LBMethod) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := pools.Delete(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853") + th.AssertNoErr(t, res.Err) +} + +func TestAssociateHealthMonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "health_monitor":{ + "id":"b624decf-d5d3-4c66-9a3d-f047e7786181" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{}`) + }) + + _, err := pools.AssociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181").Extract() + th.AssertNoErr(t, err) +} + +func TestDisassociateHealthMonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/pools/332abe93-f488-41ba-870b-2ac66be7f853/health_monitors/b624decf-d5d3-4c66-9a3d-f047e7786181", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := pools.DisassociateMonitor(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "b624decf-d5d3-4c66-9a3d-f047e7786181") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go new file mode 100644 index 000000000000..fe3601bbec95 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "pools" + monitorPath = "health_monitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func associateURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, monitorPath) +} + +func disassociateURL(c *gophercloud.ServiceClient, poolID, monitorID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, monitorPath, monitorID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go new file mode 100644 index 000000000000..7fd861044b67 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/doc.go @@ -0,0 +1,65 @@ +/* +Package vips provides information and interaction with the Virtual IPs of the +Load Balancing as a Service extension for the OpenStack Networking service. + +Example to List Virtual IPs + + listOpts := vips.ListOpts{ + SubnetID: "d9bd223b-f1a9-4f98-953b-df977b0f902d", + } + + allPages, err := vips.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allVIPs, err := vips.ExtractVIPs(allPages) + if err != nil { + panic(err) + } + + for _, vip := range allVIPs { + fmt.Printf("%+v\n", vip) + } + +Example to Create a Virtual IP + + createOpts := vips.CreateOpts{ + Protocol: "HTTP", + Name: "NewVip", + AdminStateUp: gophercloud.Enabled, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + ProtocolPort: 80, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + + i1000 := 1000 + updateOpts := vips.UpdateOpts{ + ConnLimit: &i1000, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + vip, err := vips.Update(networkClient, vipID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Virtual IP + + vipID := "93f1bad4-0423-40a8-afac-3fc541839912" + err := vips.Delete(networkClient, vipID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package vips diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go new file mode 100644 index 000000000000..53b81bfdb927 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/requests.go @@ -0,0 +1,180 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + TenantID string `q:"tenant_id"` + SubnetID string `q:"subnet_id"` + Address string `q:"address"` + PortID string `q:"port_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// Virtual IPs. It accepts a ListOpts struct, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those virtual IPs that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return VIPPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create Request. +type CreateOptsBuilder interface { + ToVIPCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new virtual IP. +type CreateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name" required:"true"` + + // SubnetID is the network on which to allocate the VIP's address. A tenant + // can only create VIPs on networks authorized by policy (e.g. networks that + // belong to them or networks that are shared). + SubnetID string `json:"subnet_id" required:"true"` + + // Protocol - can either be TCP, HTTP or HTTPS. + Protocol string `json:"protocol" required:"true"` + + // ProtocolPort is the port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another tenant. + TenantID string `json:"tenant_id,omitempty"` + + // Address is the IP address of the VIP. + Address string `json:"address,omitempty"` + + // Description is the human-readable description for the VIP. + Description string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToVIPCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Create is an operation which provisions a new virtual IP based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Please note that the PoolID should refer to a pool that is not already +// associated with another vip. If the pool is already used by another vip, +// then the operation will fail with a 409 Conflict error will be returned. +// +// Users with an admin role can create VIPs on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOpts) (r CreateResult) { + b, err := opts.ToVIPCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular virtual IP based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToVIPUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing virtual IP. +// Attributes not listed here but appear in CreateOpts are immutable and cannot +// be updated. +type UpdateOpts struct { + // Name is the human-readable name for the VIP. Does not have to be unique. + Name *string `json:"name,omitempty"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID *string `json:"pool_id,omitempty"` + + // Description is the human-readable description for the VIP. + Description *string `json:"description,omitempty"` + + // Persistence is the the of session persistence to use. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + ConnLimit *int `json:"connection_limit,omitempty"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToVIPUpdateMap builds a request body based on UpdateOpts. +func (opts UpdateOpts) ToVIPUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vip") +} + +// Update is an operation which modifies the attributes of the specified VIP. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToVIPUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular virtual IP based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go new file mode 100644 index 000000000000..cb0994a7b250 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/results.go @@ -0,0 +1,156 @@ +package vips + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same member of the pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same member of the pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same member of the pool. +type SessionPersistence struct { + // Type is the type of persistence mode. + Type string `json:"type"` + + // CookieName is the name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// VirtualIP is the primary load balancing configuration object that specifies +// the virtual IP address and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +// This entity is sometimes known in LB products under the name of a "virtual +// server", a "vserver" or a "listener". +type VirtualIP struct { + // ID is the unique ID for the VIP. + ID string `json:"id"` + + // TenantID is the owner of the VIP. + TenantID string `json:"tenant_id"` + + // Name is the human-readable name for the VIP. Does not have to be unique. + Name string `json:"name"` + + // Description is the human-readable description for the VIP. + Description string `json:"description"` + + // SubnetID is the ID of the subnet on which to allocate the VIP address. + SubnetID string `json:"subnet_id"` + + // Address is the IP address of the VIP. + Address string `json:"address"` + + // Protocol of the VIP address. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // ProtocolPort is the port on which to listen to client traffic that is + // associated with the VIP address. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // PoolID is the ID of the pool with which the VIP is associated. + PoolID string `json:"pool_id"` + + // PortID is the ID of the port which belongs to the load balancer. + PortID string `json:"port_id"` + + // Persistence indicates whether connections in the same session will be + // processed by the same pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // ConnLimit is the maximum number of connections allowed for the VIP. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // AdminStateUp is the administrative state of the VIP. A valid value is + // true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Status is the status of the VIP. Indicates whether the VIP is operational. + Status string `json:"status"` +} + +// VIPPage is the page returned by a pager when traversing over a +// collection of virtual IPs. +type VIPPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of routers has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r VIPPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"vips_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a VIPPage struct is empty. +func (r VIPPage) IsEmpty() (bool, error) { + is, err := ExtractVIPs(r) + return len(is) == 0, err +} + +// ExtractVIPs accepts a Page struct, specifically a VIPPage struct, +// and extracts the elements into a slice of VirtualIP structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractVIPs(r pagination.Page) ([]VirtualIP, error) { + var s struct { + VIPs []VirtualIP `json:"vips"` + } + err := (r.(VIPPage)).ExtractInto(&s) + return s.VIPs, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a VirtualIP. +func (r commonResult) Extract() (*VirtualIP, error) { + var s struct { + VirtualIP *VirtualIP `json:"vip" json:"vip"` + } + err := r.ExtractInto(&s) + return s.VirtualIP, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a VirtualIP +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a VirtualIP +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a VirtualIP +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/doc.go new file mode 100644 index 000000000000..e04046fbefaf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/doc.go @@ -0,0 +1,2 @@ +// vips unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/requests_test.go new file mode 100644 index 000000000000..7f9b6ddb7824 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/testing/requests_test.go @@ -0,0 +1,330 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vips":[ + { + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "web_vip", + "description": "lb config for the web tier", + "subnet_id": "96a4386a-f8c3-42ed-afce-d7954eee77b3", + "address" : "10.30.176.47", + "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + "protocol": "HTTP", + "protocol_port": 80, + "pool_id" : "cfc6589d-f949-4c66-99d2-c2da56ef3764", + "admin_state_up": true, + "status": "ACTIVE" + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db_vip", + "description": "lb config for the db tier", + "subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "address" : "10.30.176.48", + "port_id" : "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + "protocol": "TCP", + "protocol_port": 3306, + "pool_id" : "41efe233-7591-43c5-9cf7-923964759f9e", + "session_persistence" : {"type" : "SOURCE_IP"}, + "connection_limit" : 2000, + "admin_state_up": true, + "status": "INACTIVE" + } + ] +} + `) + }) + + count := 0 + + vips.List(fake.ServiceClient(), vips.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := vips.ExtractVIPs(page) + if err != nil { + t.Errorf("Failed to extract LBs: %v", err) + return false, err + } + + expected := []vips.VirtualIP{ + { + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "web_vip", + Description: "lb config for the web tier", + SubnetID: "96a4386a-f8c3-42ed-afce-d7954eee77b3", + Address: "10.30.176.47", + PortID: "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + Protocol: "HTTP", + ProtocolPort: 80, + PoolID: "cfc6589d-f949-4c66-99d2-c2da56ef3764", + Persistence: vips.SessionPersistence{}, + ConnLimit: 0, + AdminStateUp: true, + Status: "ACTIVE", + }, + { + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "db_vip", + Description: "lb config for the db tier", + SubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + Address: "10.30.176.48", + PortID: "cd1f7a47-4fa6-449c-9ee7-632838aedfea", + Protocol: "TCP", + ProtocolPort: 3306, + PoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + Persistence: vips.SessionPersistence{Type: "SOURCE_IP"}, + ConnLimit: 2000, + AdminStateUp: true, + Status: "INACTIVE", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vip": { + "protocol": "HTTP", + "name": "NewVip", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "protocol_port": 80, + "session_persistence": {"type": "SOURCE_IP"} + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "PENDING_CREATE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": -1, + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "address": "10.0.0.11", + "protocol_port": 80, + "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", + "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2", + "name": "NewVip" + } +} + `) + }) + + opts := vips.CreateOpts{ + Protocol: "HTTP", + Name: "NewVip", + AdminStateUp: gophercloud.Enabled, + SubnetID: "8032909d-47a1-4715-90af-5153ffe39861", + PoolID: "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + ProtocolPort: 80, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + + r, err := vips.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_CREATE", r.Status) + th.AssertEquals(t, "HTTP", r.Protocol) + th.AssertEquals(t, "", r.Description) + th.AssertEquals(t, true, r.AdminStateUp) + th.AssertEquals(t, "8032909d-47a1-4715-90af-5153ffe39861", r.SubnetID) + th.AssertEquals(t, "83657cfcdfe44cd5920adaf26c48ceea", r.TenantID) + th.AssertEquals(t, -1, r.ConnLimit) + th.AssertEquals(t, "61b1f87a-7a21-4ad3-9dda-7f81d249944f", r.PoolID) + th.AssertEquals(t, "10.0.0.11", r.Address) + th.AssertEquals(t, 80, r.ProtocolPort) + th.AssertEquals(t, "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", r.PortID) + th.AssertEquals(t, "c987d2be-9a3c-4ac9-a046-e8716b1350e2", r.ID) + th.AssertEquals(t, "NewVip", r.Name) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := vips.Create(fake.ServiceClient(), vips.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = vips.Create(fake.ServiceClient(), vips.CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = vips.Create(fake.ServiceClient(), vips.CreateOpts{Name: "foo", SubnetID: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = vips.Create(fake.ServiceClient(), vips.CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = vips.Create(fake.ServiceClient(), vips.CreateOpts{Name: "foo", SubnetID: "bar", Protocol: "bar", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "ACTIVE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": 1000, + "pool_id": "72741b06-df4d-4715-b142-276b6bce75ab", + "session_persistence": { + "cookie_name": "MyAppCookie", + "type": "APP_COOKIE" + }, + "address": "10.0.0.10", + "protocol_port": 80, + "port_id": "b5a743d6-056b-468b-862d-fb13a9aa694e", + "id": "4ec89087-d057-4e2c-911f-60a3b47ee304", + "name": "my-vip" + } +} + `) + }) + + vip, err := vips.Get(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "ACTIVE", vip.Status) + th.AssertEquals(t, "HTTP", vip.Protocol) + th.AssertEquals(t, "", vip.Description) + th.AssertEquals(t, true, vip.AdminStateUp) + th.AssertEquals(t, 1000, vip.ConnLimit) + th.AssertEquals(t, vips.SessionPersistence{Type: "APP_COOKIE", CookieName: "MyAppCookie"}, vip.Persistence) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vip": { + "connection_limit": 1000, + "session_persistence": {"type": "SOURCE_IP"} + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` +{ + "vip": { + "status": "PENDING_UPDATE", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "subnet_id": "8032909d-47a1-4715-90af-5153ffe39861", + "tenant_id": "83657cfcdfe44cd5920adaf26c48ceea", + "connection_limit": 1000, + "pool_id": "61b1f87a-7a21-4ad3-9dda-7f81d249944f", + "address": "10.0.0.11", + "protocol_port": 80, + "port_id": "f7e6fe6a-b8b5-43a8-8215-73456b32e0f5", + "id": "c987d2be-9a3c-4ac9-a046-e8716b1350e2", + "name": "NewVip" + } +} + `) + }) + + i1000 := 1000 + options := vips.UpdateOpts{ + ConnLimit: &i1000, + Persistence: &vips.SessionPersistence{Type: "SOURCE_IP"}, + } + vip, err := vips.Update(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "PENDING_UPDATE", vip.Status) + th.AssertEquals(t, 1000, vip.ConnLimit) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/lb/vips/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := vips.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go new file mode 100644 index 000000000000..584a1cf680c9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas/vips/urls.go @@ -0,0 +1,16 @@ +package vips + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lb" + resourcePath = "vips" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/doc.go new file mode 100644 index 000000000000..ec7f9d6f049a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/doc.go @@ -0,0 +1,3 @@ +// Package lbaas_v2 provides information and interaction with the Load Balancer +// as a Service v2 extension for the OpenStack Networking service. +package lbaas_v2 diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go new file mode 100644 index 000000000000..108cdb03d8bf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/doc.go @@ -0,0 +1,63 @@ +/* +Package listeners provides information and interaction with Listeners of the +LBaaS v2 extension for the OpenStack Networking service. + +Example to List Listeners + + listOpts := listeners.ListOpts{ + LoadbalancerID : "ca430f80-1737-4712-8dc6-3f640d55594b", + } + + allPages, err := listeners.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allListeners, err := listeners.ExtractListeners(allPages) + if err != nil { + panic(err) + } + + for _, listener := range allListeners { + fmt.Printf("%+v\n", listener) + } + +Example to Create a Listener + + createOpts := listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + } + + listener, err := listeners.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := listeners.UpdateOpts{ + ConnLimit: &i1001, + } + + listener, err := listeners.Update(networkClient, listenerID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Listener + + listenerID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := listeners.Delete(networkClient, listenerID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package listeners diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go new file mode 100644 index 000000000000..dd190f606f20 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/requests.go @@ -0,0 +1,199 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Type Protocol represents a listener protocol. +type Protocol string + +// Supported attributes for create/update operations. +const ( + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToListenerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the floating IP attributes you want to see returned. SortKey allows you to +// sort by a particular listener attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + LoadbalancerID string `q:"loadbalancer_id"` + DefaultPoolID string `q:"default_pool_id"` + Protocol string `q:"protocol"` + ProtocolPort int `q:"protocol_port"` + ConnectionLimit int `q:"connection_limit"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToListenerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToListenerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// listeners. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those listeners that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToListenerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ListenerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToListenerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options for creating a listener. +type CreateOpts struct { + // The load balancer on which to provision this listener. + LoadbalancerID string `json:"loadbalancer_id" required:"true"` + + // The protocol - can either be TCP, HTTP or HTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // TenantID is only required if the caller has an admin role and wants + // to create a pool for another project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is only required if the caller has an admin role and wants + // to create a pool for another project. + ProjectID string `json:"project_id,omitempty"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // The ID of the default pool with which the Listener is associated. + DefaultPoolID string `json:"default_pool_id,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToListenerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Create is an operation which provisions a new Listeners based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +// +// Users with an admin role can create Listeners on behalf of other tenants by +// specifying a TenantID attribute different than their own. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToListenerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Listeners based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToListenerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options for updating a Listener. +type UpdateOpts struct { + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Listener. + Description string `json:"description,omitempty"` + + // The maximum number of connections allowed for the Listener. + ConnLimit *int `json:"connection_limit,omitempty"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref,omitempty"` + + // A list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs,omitempty"` + + // The administrative state of the Listener. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToListenerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToListenerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "listener") +} + +// Update is an operation which modifies the attributes of the specified +// Listener. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToListenerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Listeners based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go new file mode 100644 index 000000000000..81cda90361fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/results.go @@ -0,0 +1,135 @@ +package listeners + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + "github.com/gophercloud/gophercloud/pagination" +) + +type LoadBalancerID struct { + ID string `json:"id"` +} + +// Listener is the primary load balancing configuration object that specifies +// the loadbalancer and port on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type Listener struct { + // The unique ID for the Listener. + ID string `json:"id"` + + // Owner of the Listener. + TenantID string `json:"tenant_id"` + + // Human-readable name for the Listener. Does not have to be unique. + Name string `json:"name"` + + // Human-readable description for the Listener. + Description string `json:"description"` + + // The protocol to loadbalance. A valid value is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // The port on which to listen to client traffic that is associated with the + // Loadbalancer. A valid value is from 0 to 65535. + ProtocolPort int `json:"protocol_port"` + + // The UUID of default pool. Must have compatible protocol with listener. + DefaultPoolID string `json:"default_pool_id"` + + // A list of load balancer IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // The maximum number of connections allowed for the Loadbalancer. + // Default is -1, meaning no limit. + ConnLimit int `json:"connection_limit"` + + // The list of references to TLS secrets. + SniContainerRefs []string `json:"sni_container_refs"` + + // A reference to a Barbican container of TLS secrets. + DefaultTlsContainerRef string `json:"default_tls_container_ref"` + + // The administrative state of the Listener. A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Pools are the pools which are part of this listener. + Pools []pools.Pool `json:"pools"` + + // The provisioning status of the listener. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// ListenerPage is the page returned by a pager when traversing over a +// collection of listeners. +type ListenerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of listeners has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r ListenerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"listeners_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ListenerPage struct is empty. +func (r ListenerPage) IsEmpty() (bool, error) { + is, err := ExtractListeners(r) + return len(is) == 0, err +} + +// ExtractListeners accepts a Page struct, specifically a ListenerPage struct, +// and extracts the elements into a slice of Listener structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractListeners(r pagination.Page) ([]Listener, error) { + var s struct { + Listeners []Listener `json:"listeners"` + } + err := (r.(ListenerPage)).ExtractInto(&s) + return s.Listeners, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a listener. +func (r commonResult) Extract() (*Listener, error) { + var s struct { + Listener *Listener `json:"listener"` + } + err := r.ExtractInto(&s) + return s.Listener, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Listener. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Listener. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Listener. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/doc.go new file mode 100644 index 000000000000..f41387e827c5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/doc.go @@ -0,0 +1,2 @@ +// listeners unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/fixtures.go new file mode 100644 index 000000000000..fa4fa25c1804 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/fixtures.go @@ -0,0 +1,213 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ListenersListBody contains the canned body of a listeners list response. +const ListenersListBody = ` +{ + "listeners":[ + { + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "web", + "description": "listener config for the web tier", + "loadbalancers": [{"id": "53306cda-815d-4354-9444-59e09da9c3c5"}], + "protocol": "HTTP", + "protocol_port": 80, + "default_pool_id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 2000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } + ] +} +` + +// SingleServerBody is the canned body of a Get request on an existing listener. +const SingleListenerBody = ` +{ + "listener": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "db", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 2000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } +} +` + +// PostUpdateListenerBody is the canned response body of a Update request on an existing listener. +const PostUpdateListenerBody = ` +{ + "listener": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "310df60f-2a10-4ee5-9554-98393092194c", + "name": "NewListenerName", + "description": "listener config for the db tier", + "loadbalancers": [{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "protocol": "TCP", + "protocol_port": 3306, + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "connection_limit": 1000, + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "sni_container_refs": ["3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"] + } +} +` + +var ( + ListenerWeb = listeners.Listener{ + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "web", + Description: "listener config for the web tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "53306cda-815d-4354-9444-59e09da9c3c5"}}, + Protocol: "HTTP", + ProtocolPort: 80, + DefaultPoolID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } + ListenerDb = listeners.Listener{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "db", + Description: "listener config for the db tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Protocol: "TCP", + ProtocolPort: 3306, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ConnLimit: 2000, + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } + ListenerUpdated = listeners.Listener{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "310df60f-2a10-4ee5-9554-98393092194c", + Name: "NewListenerName", + Description: "listener config for the db tier", + Loadbalancers: []listeners.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Protocol: "TCP", + ProtocolPort: 3306, + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ConnLimit: 1000, + AdminStateUp: true, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + SniContainerRefs: []string{"3d328d82-2547-4921-ac2f-61c3b452b5ff", "b3cfd7e3-8c19-455c-8ebb-d78dfd8f7e7d"}, + } +) + +// HandleListenerListSuccessfully sets up the test server to respond to a listener List request. +func HandleListenerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, ListenersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "listeners": [] }`) + default: + t.Fatalf("/v2.0/lbaas/listeners invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleListenerCreationSuccessfully sets up the test server to respond to a listener creation request +// with a given response. +func HandleListenerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "listener": { + "loadbalancer_id": "79e05663-7f03-45d2-a092-8b94062f22ab", + "protocol": "TCP", + "name": "db", + "admin_state_up": true, + "default_tls_container_ref": "2c433435-20de-4411-84ae-9cc8917def76", + "default_pool_id": "41efe233-7591-43c5-9cf7-923964759f9e", + "protocol_port": 3306 + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleListenerGetSuccessfully sets up the test server to respond to a listener Get request. +func HandleListenerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleListenerBody) + }) +} + +// HandleListenerDeletionSuccessfully sets up the test server to respond to a listener deletion request. +func HandleListenerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleListenerUpdateSuccessfully sets up the test server to respond to a listener Update request. +func HandleListenerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/listeners/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "listener": { + "name": "NewListenerName", + "connection_limit": 1001 + } + }`) + + fmt.Fprintf(w, PostUpdateListenerBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/requests_test.go new file mode 100644 index 000000000000..d463f6e85922 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/testing/requests_test.go @@ -0,0 +1,137 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListListeners(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerListSuccessfully(t) + + pages := 0 + err := listeners.List(fake.ServiceClient(), listeners.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := listeners.ExtractListeners(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 listeners, got %d", len(actual)) + } + th.CheckDeepEquals(t, ListenerWeb, actual[0]) + th.CheckDeepEquals(t, ListenerDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllListeners(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerListSuccessfully(t) + + allPages, err := listeners.List(fake.ServiceClient(), listeners.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := listeners.ExtractListeners(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ListenerWeb, actual[0]) + th.CheckDeepEquals(t, ListenerDb, actual[1]) +} + +func TestCreateListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerCreationSuccessfully(t, SingleListenerBody) + + actual, err := listeners.Create(fake.ServiceClient(), listeners.CreateOpts{ + Protocol: "TCP", + Name: "db", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + AdminStateUp: gophercloud.Enabled, + DefaultTlsContainerRef: "2c433435-20de-4411-84ae-9cc8917def76", + DefaultPoolID: "41efe233-7591-43c5-9cf7-923964759f9e", + ProtocolPort: 3306, + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListenerDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := listeners.Create(fake.ServiceClient(), listeners.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", TenantID: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", TenantID: "bar", Protocol: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = listeners.Create(fake.ServiceClient(), listeners.CreateOpts{Name: "foo", TenantID: "bar", Protocol: "bar", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := listeners.Get(client, "4ec89087-d057-4e2c-911f-60a3b47ee304").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, ListenerDb, *actual) +} + +func TestDeleteListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerDeletionSuccessfully(t) + + res := listeners.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateListener(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListenerUpdateSuccessfully(t) + + client := fake.ServiceClient() + i1001 := 1001 + actual, err := listeners.Update(client, "4ec89087-d057-4e2c-911f-60a3b47ee304", listeners.UpdateOpts{ + Name: "NewListenerName", + ConnLimit: &i1001, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, ListenerUpdated, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go new file mode 100644 index 000000000000..02fb1eb39ec6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners/urls.go @@ -0,0 +1,16 @@ +package listeners + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "listeners" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go new file mode 100644 index 000000000000..eea43391a8cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/doc.go @@ -0,0 +1,71 @@ +/* +Package loadbalancers provides information and interaction with Load Balancers +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Load Balancers + + listOpts := loadbalancers.ListOpts{ + Provider: "haproxy", + } + + allPages, err := loadbalancers.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allLoadbalancers, err := loadbalancers.ExtractLoadBalancers(allPages) + if err != nil { + panic(err) + } + + for _, lb := range allLoadbalancers { + fmt.Printf("%+v\n", lb) + } + +Example to Create a Load Balancer + + createOpts := loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + } + + lb, err := loadbalancers.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + + i1001 := 1001 + updateOpts := loadbalancers.UpdateOpts{ + Name: "new-name", + } + + lb, err := loadbalancers.Update(networkClient, lbID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Load Balancers + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := loadbalancers.Delete(networkClient, lbID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get the Status of a Load Balancer + + lbID := "d67d56a6-4a86-4688-a282-f46444705c64" + status, err := loadbalancers.GetStatuses(networkClient, LBID).Extract() + if err != nil { + panic(err) + } +*/ +package loadbalancers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go new file mode 100644 index 000000000000..1ed23c3c82ae --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/requests.go @@ -0,0 +1,198 @@ +package loadbalancers + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToLoadBalancerListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Loadbalancer attributes you want to see returned. SortKey allows you to +// sort by a particular attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + ProvisioningStatus string `q:"provisioning_status"` + VipAddress string `q:"vip_address"` + VipPortID string `q:"vip_port_id"` + VipSubnetID string `q:"vip_subnet_id"` + ID string `q:"id"` + OperatingStatus string `q:"operating_status"` + Name string `q:"name"` + Flavor string `q:"flavor"` + Provider string `q:"provider"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToLoadBalancerListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToLoadBalancerListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// load balancers. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +// +// Default policy settings return only those load balancers that are owned by +// the tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToLoadBalancerListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return LoadBalancerPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToLoadBalancerCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The network on which to allocate the Loadbalancer's address. A tenant can + // only create Loadbalancers on networks authorized by policy (e.g. networks + // that belong to them or networks that are shared). + VipSubnetID string `json:"vip_subnet_id" required:"true"` + + // TenantID is the UUID of the project who owns the Loadbalancer. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Loadbalancer. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // The UUID of a flavor. + Flavor string `json:"flavor,omitempty"` + + // The name of the provider. + Provider string `json:"provider,omitempty"` +} + +// ToLoadBalancerCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToLoadBalancerCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Create is an operation which provisions a new loadbalancer based on the +// configuration defined in the CreateOpts struct. Once the request is +// validated and progress has started on the provisioning process, a +// CreateResult will be returned. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToLoadBalancerCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Loadbalancer based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToLoadBalancerUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Human-readable name for the Loadbalancer. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Human-readable description for the Loadbalancer. + Description string `json:"description,omitempty"` + + // The administrative state of the Loadbalancer. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToLoadBalancerUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToLoadBalancerUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "loadbalancer") +} + +// Update is an operation which modifies the attributes of the specified +// LoadBalancer. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOpts) (r UpdateResult) { + b, err := opts.ToLoadBalancerUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular LoadBalancer based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// CascadingDelete is like `Delete`, but will also delete any of the load balancer's +// children (listener, monitor, etc). +// NOTE: This function will only work with Octavia load balancers; Neutron does not +// support this. +func CascadingDelete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + if c.Type != "load-balancer" { + r.Err = fmt.Errorf("error prior to running cascade delete: only Octavia LBs supported") + return + } + u := fmt.Sprintf("%s?cascade=true", resourceURL(c, id)) + _, r.Err = c.Delete(u, nil) + return +} + +// GetStatuses will return the status of a particular LoadBalancer. +func GetStatuses(c *gophercloud.ServiceClient, id string) (r GetStatusesResult) { + _, r.Err = c.Get(statusRootURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go new file mode 100644 index 000000000000..42fff5713198 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/results.go @@ -0,0 +1,149 @@ +package loadbalancers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/pagination" +) + +// LoadBalancer is the primary load balancing configuration object that +// specifies the virtual IP address on which client traffic is received, as well +// as other details such as the load balancing method to be use, protocol, etc. +type LoadBalancer struct { + // Human-readable description for the Loadbalancer. + Description string `json:"description"` + + // The administrative state of the Loadbalancer. + // A valid value is true (UP) or false (DOWN). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the LoadBalancer. + TenantID string `json:"tenant_id"` + + // The provisioning status of the LoadBalancer. + // This value is ACTIVE, PENDING_CREATE or ERROR. + ProvisioningStatus string `json:"provisioning_status"` + + // The IP address of the Loadbalancer. + VipAddress string `json:"vip_address"` + + // The UUID of the port associated with the IP address. + VipPortID string `json:"vip_port_id"` + + // The UUID of the subnet on which to allocate the virtual IP for the + // Loadbalancer address. + VipSubnetID string `json:"vip_subnet_id"` + + // The unique ID for the LoadBalancer. + ID string `json:"id"` + + // The operating status of the LoadBalancer. This value is ONLINE or OFFLINE. + OperatingStatus string `json:"operating_status"` + + // Human-readable name for the LoadBalancer. Does not have to be unique. + Name string `json:"name"` + + // The UUID of a flavor if set. + Flavor string `json:"flavor"` + + // The name of the provider. + Provider string `json:"provider"` + + // Listeners are the listeners related to this Loadbalancer. + Listeners []listeners.Listener `json:"listeners"` +} + +// StatusTree represents the status of a loadbalancer. +type StatusTree struct { + Loadbalancer *LoadBalancer `json:"loadbalancer"` +} + +// LoadBalancerPage is the page returned by a pager when traversing over a +// collection of load balancers. +type LoadBalancerPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of load balancers has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r LoadBalancerPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"loadbalancers_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a LoadBalancerPage struct is empty. +func (r LoadBalancerPage) IsEmpty() (bool, error) { + is, err := ExtractLoadBalancers(r) + return len(is) == 0, err +} + +// ExtractLoadBalancers accepts a Page struct, specifically a LoadbalancerPage +// struct, and extracts the elements into a slice of LoadBalancer structs. In +// other words, a generic collection is mapped into a relevant slice. +func ExtractLoadBalancers(r pagination.Page) ([]LoadBalancer, error) { + var s struct { + LoadBalancers []LoadBalancer `json:"loadbalancers"` + } + err := (r.(LoadBalancerPage)).ExtractInto(&s) + return s.LoadBalancers, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a loadbalancer. +func (r commonResult) Extract() (*LoadBalancer, error) { + var s struct { + LoadBalancer *LoadBalancer `json:"loadbalancer"` + } + err := r.ExtractInto(&s) + return s.LoadBalancer, err +} + +// GetStatusesResult represents the result of a GetStatuses operation. +// Call its Extract method to interpret it as a StatusTree. +type GetStatusesResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts the status of +// a Loadbalancer. +func (r GetStatusesResult) Extract() (*StatusTree, error) { + var s struct { + Statuses *StatusTree `json:"statuses"` + } + err := r.ExtractInto(&s) + return s.Statuses, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a LoadBalancer. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a LoadBalancer. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a LoadBalancer. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/doc.go new file mode 100644 index 000000000000..b54468c82f15 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/doc.go @@ -0,0 +1,2 @@ +// loadbalancers unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/fixtures.go new file mode 100644 index 000000000000..2bc949b9261f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/fixtures.go @@ -0,0 +1,292 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/listeners" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" +) + +// LoadbalancersListBody contains the canned body of a loadbalancer list response. +const LoadbalancersListBody = ` +{ + "loadbalancers":[ + { + "id": "c331058c-6a40-4144-948e-b9fb1df9db4b", + "tenant_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "web_lb", + "description": "lb config for the web tier", + "vip_subnet_id": "8a49c438-848f-467b-9655-ea1548708154", + "vip_address": "10.30.176.47", + "vip_port_id": "2a22e552-a347-44fd-b530-1f2b1b2a6735", + "flavor": "small", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "ACTIVE", + "operating_status": "ONLINE" + }, + { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "db_lb", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } + ] +} +` + +// SingleLoadbalancerBody is the canned body of a Get request on an existing loadbalancer. +const SingleLoadbalancerBody = ` +{ + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "db_lb", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } +} +` + +// PostUpdateLoadbalancerBody is the canned response body of a Update request on an existing loadbalancer. +const PostUpdateLoadbalancerBody = ` +{ + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "tenant_id": "54030507-44f7-473c-9342-b4d14a95f692", + "name": "NewLoadbalancerName", + "description": "lb config for the db tier", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "vip_port_id": "2bf413c8-41a9-4477-b505-333d5cbe8b55", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true, + "provisioning_status": "PENDING_CREATE", + "operating_status": "OFFLINE" + } +} +` + +// SingleLoadbalancerBody is the canned body of a Get request on an existing loadbalancer. +const LoadbalancerStatuesesTree = ` +{ + "statuses" : { + "loadbalancer": { + "id": "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + "name": "db_lb", + "provisioning_status": "PENDING_UPDATE", + "operating_status": "ACTIVE", + "listeners": [{ + "id": "db902c0c-d5ff-4753-b465-668ad9656918", + "name": "db", + "provisioning_status": "PENDING_UPDATE", + "pools": [{ + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "name": "db", + "provisioning_status": "PENDING_UPDATE", + "healthmonitor": { + "id": "67306cda-815d-4354-9fe4-59e09da9c3c5", + "type":"PING", + "provisioning_status": "PENDING_UPDATE" + }, + "members":[{ + "id": "2a280670-c202-4b0b-a562-34077415aabf", + "name": "db", + "address": "10.0.2.11", + "protocol_port": 80, + "provisioning_status": "PENDING_UPDATE" + }] + }] + }] + } + } +} +` + +var ( + LoadbalancerWeb = loadbalancers.LoadBalancer{ + ID: "c331058c-6a40-4144-948e-b9fb1df9db4b", + TenantID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "web_lb", + Description: "lb config for the web tier", + VipSubnetID: "8a49c438-848f-467b-9655-ea1548708154", + VipAddress: "10.30.176.47", + VipPortID: "2a22e552-a347-44fd-b530-1f2b1b2a6735", + Flavor: "small", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "ACTIVE", + OperatingStatus: "ONLINE", + } + LoadbalancerDb = loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "db_lb", + Description: "lb config for the db tier", + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + VipPortID: "2bf413c8-41a9-4477-b505-333d5cbe8b55", + Flavor: "medium", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "PENDING_CREATE", + OperatingStatus: "OFFLINE", + } + LoadbalancerUpdated = loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + TenantID: "54030507-44f7-473c-9342-b4d14a95f692", + Name: "NewLoadbalancerName", + Description: "lb config for the db tier", + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + VipPortID: "2bf413c8-41a9-4477-b505-333d5cbe8b55", + Flavor: "medium", + Provider: "haproxy", + AdminStateUp: true, + ProvisioningStatus: "PENDING_CREATE", + OperatingStatus: "OFFLINE", + } + LoadbalancerStatusesTree = loadbalancers.LoadBalancer{ + ID: "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", + Name: "db_lb", + ProvisioningStatus: "PENDING_UPDATE", + OperatingStatus: "ACTIVE", + Listeners: []listeners.Listener{{ + ID: "db902c0c-d5ff-4753-b465-668ad9656918", + Name: "db", + ProvisioningStatus: "PENDING_UPDATE", + Pools: []pools.Pool{{ + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Name: "db", + ProvisioningStatus: "PENDING_UPDATE", + Monitor: monitors.Monitor{ + ID: "67306cda-815d-4354-9fe4-59e09da9c3c5", + Type: "PING", + ProvisioningStatus: "PENDING_UPDATE", + }, + Members: []pools.Member{{ + ID: "2a280670-c202-4b0b-a562-34077415aabf", + Name: "db", + Address: "10.0.2.11", + ProtocolPort: 80, + ProvisioningStatus: "PENDING_UPDATE", + }}, + }}, + }}, + } +) + +// HandleLoadbalancerListSuccessfully sets up the test server to respond to a loadbalancer List request. +func HandleLoadbalancerListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, LoadbalancersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "loadbalancers": [] }`) + default: + t.Fatalf("/v2.0/lbaas/loadbalancers invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleLoadbalancerCreationSuccessfully sets up the test server to respond to a loadbalancer creation request +// with a given response. +func HandleLoadbalancerCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "loadbalancer": { + "name": "db_lb", + "vip_subnet_id": "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + "vip_address": "10.30.176.48", + "flavor": "medium", + "provider": "haproxy", + "admin_state_up": true + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleLoadbalancerGetSuccessfully sets up the test server to respond to a loadbalancer Get request. +func HandleLoadbalancerGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleLoadbalancerBody) + }) +} + +// HandleLoadbalancerGetStatusesTree sets up the test server to respond to a loadbalancer Get statuses tree request. +func HandleLoadbalancerGetStatusesTree(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab/statuses", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, LoadbalancerStatuesesTree) + }) +} + +// HandleLoadbalancerDeletionSuccessfully sets up the test server to respond to a loadbalancer deletion request. +func HandleLoadbalancerDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleLoadbalancerUpdateSuccessfully sets up the test server to respond to a loadbalancer Update request. +func HandleLoadbalancerUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/loadbalancers/36e08a3e-a78f-4b40-a229-1e7e23eee1ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "loadbalancer": { + "name": "NewLoadbalancerName" + } + }`) + + fmt.Fprintf(w, PostUpdateLoadbalancerBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/requests_test.go new file mode 100644 index 000000000000..e370c669bfdc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/testing/requests_test.go @@ -0,0 +1,161 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListLoadbalancers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerListSuccessfully(t) + + pages := 0 + err := loadbalancers.List(fake.ServiceClient(), loadbalancers.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := loadbalancers.ExtractLoadBalancers(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 loadbalancers, got %d", len(actual)) + } + th.CheckDeepEquals(t, LoadbalancerWeb, actual[0]) + th.CheckDeepEquals(t, LoadbalancerDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllLoadbalancers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerListSuccessfully(t) + + allPages, err := loadbalancers.List(fake.ServiceClient(), loadbalancers.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := loadbalancers.ExtractLoadBalancers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, LoadbalancerWeb, actual[0]) + th.CheckDeepEquals(t, LoadbalancerDb, actual[1]) +} + +func TestCreateLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerCreationSuccessfully(t, SingleLoadbalancerBody) + + actual, err := loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{ + Name: "db_lb", + AdminStateUp: gophercloud.Enabled, + VipSubnetID: "9cedb85d-0759-4898-8a4b-fa5a5ea10086", + VipAddress: "10.30.176.48", + Flavor: "medium", + Provider: "haproxy", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, LoadbalancerDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo", Description: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = loadbalancers.Create(fake.ServiceClient(), loadbalancers.CreateOpts{Name: "foo", Description: "bar", VipAddress: "bar"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.Get(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerDb, *actual) +} + +func TestGetLoadbalancerStatusesTree(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerGetStatusesTree(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.GetStatuses(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerStatusesTree, *(actual.Loadbalancer)) +} + +func TestDeleteLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerDeletionSuccessfully(t) + + res := loadbalancers.Delete(fake.ServiceClient(), "36e08a3e-a78f-4b40-a229-1e7e23eee1ab") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := loadbalancers.Update(client, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab", loadbalancers.UpdateOpts{ + Name: "NewLoadbalancerName", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, LoadbalancerUpdated, *actual) +} + +func TestCascadingDeleteLoadbalancer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleLoadbalancerDeletionSuccessfully(t) + + sc := fake.ServiceClient() + sc.Type = "network" + err := loadbalancers.CascadingDelete(sc, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").ExtractErr() + if err == nil { + t.Fatalf("expected error running CascadingDelete with Neutron service client but didn't get one") + } + + sc.Type = "load-balancer" + err = loadbalancers.CascadingDelete(sc, "36e08a3e-a78f-4b40-a229-1e7e23eee1ab").ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go new file mode 100644 index 000000000000..73cf5dc126a3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/loadbalancers/urls.go @@ -0,0 +1,21 @@ +package loadbalancers + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "loadbalancers" + statusPath = "statuses" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func statusRootURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id, statusPath) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go new file mode 100644 index 000000000000..6ed8c8fb5fff --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/doc.go @@ -0,0 +1,69 @@ +/* +Package monitors provides information and interaction with Monitors +of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Monitors + + listOpts := monitors.ListOpts{ + PoolID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := monitors.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMonitors, err := monitors.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, monitor := range allMonitors { + fmt.Printf("%+v\n", monitor) + } + +Example to Create a Monitor + + createOpts := monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + } + + monitor, err := monitors.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + } + + monitor, err := monitors.Update(networkClient, monitorID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Monitor + + monitorID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := monitors.Delete(networkClient, monitorID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package monitors diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go new file mode 100644 index 000000000000..c173e1c64e70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/requests.go @@ -0,0 +1,257 @@ +package monitors + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToMonitorListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Monitor attributes you want to see returned. SortKey allows you to +// sort by a particular Monitor attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + PoolID string `q:"pool_id"` + Type string `q:"type"` + Delay int `q:"delay"` + Timeout int `q:"timeout"` + MaxRetries int `q:"max_retries"` + HTTPMethod string `q:"http_method"` + URLPath string `q:"url_path"` + ExpectedCodes string `q:"expected_codes"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMonitorListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToMonitorListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// health monitors. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those health monitors that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToMonitorListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MonitorPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Constants that represent approved monitoring types. +const ( + TypePING = "PING" + TypeTCP = "TCP" + TypeHTTP = "HTTP" + TypeHTTPS = "HTTPS" +) + +var ( + errDelayMustGETimeout = fmt.Errorf("Delay must be greater than or equal to timeout") +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToMonitorCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The Pool to Monitor. + PoolID string `json:"pool_id" required:"true"` + + // The type of probe, which is PING, TCP, HTTP, or HTTPS, that is + // sent by the load balancer to verify the member state. + Type string `json:"type" required:"true"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay" required:"true"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout" required:"true"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries" required:"true"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // TenantID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Monitor. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToMonitorCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "healthmonitor") + if err != nil { + return nil, err + } + + switch opts.Type { + case TypeHTTP, TypeHTTPS: + switch opts.URLPath { + case "": + return nil, fmt.Errorf("URLPath must be provided for HTTP and HTTPS") + } + switch opts.ExpectedCodes { + case "": + return nil, fmt.Errorf("ExpectedCodes must be provided for HTTP and HTTPS") + } + } + + return b, nil +} + +/* + Create is an operation which provisions a new Health Monitor. There are + different types of Monitor you can provision: PING, TCP or HTTP(S). Below + are examples of how to create each one. + + Here is an example config struct to use when creating a PING or TCP Monitor: + + CreateOpts{Type: TypePING, Delay: 20, Timeout: 10, MaxRetries: 3} + CreateOpts{Type: TypeTCP, Delay: 20, Timeout: 10, MaxRetries: 3} + + Here is an example config struct to use when creating a HTTP(S) Monitor: + + CreateOpts{Type: TypeHTTP, Delay: 20, Timeout: 10, MaxRetries: 3, + HttpMethod: "HEAD", ExpectedCodes: "200", PoolID: "2c946bfc-1804-43ab-a2ff-58f6a762b505"} +*/ +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToMonitorCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular Health Monitor based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToMonitorUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // The time, in seconds, between sending probes to members. + Delay int `json:"delay,omitempty"` + + // Maximum number of seconds for a Monitor to wait for a ping reply + // before it times out. The value must be less than the delay value. + Timeout int `json:"timeout,omitempty"` + + // Number of permissible ping failures before changing the member's + // status to INACTIVE. Must be a number between 1 and 10. + MaxRetries int `json:"max_retries,omitempty"` + + // URI path that will be accessed if Monitor type is HTTP or HTTPS. + // Required for HTTP(S) types. + URLPath string `json:"url_path,omitempty"` + + // The HTTP method used for requests by the Monitor. If this attribute + // is not specified, it defaults to "GET". Required for HTTP(S) types. + HTTPMethod string `json:"http_method,omitempty"` + + // Expected HTTP codes for a passing HTTP(S) Monitor. You can either specify + // a single status like "200", or a range like "200-202". Required for HTTP(S) + // types. + ExpectedCodes string `json:"expected_codes,omitempty"` + + // The Name of the Monitor. + Name string `json:"name,omitempty"` + + // The administrative state of the Monitor. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMonitorUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToMonitorUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "healthmonitor") +} + +// Update is an operation which modifies the attributes of the specified +// Monitor. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToMonitorUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will permanently delete a particular Monitor based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go new file mode 100644 index 000000000000..a78f7aeb0ffa --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/results.go @@ -0,0 +1,153 @@ +package monitors + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type PoolID struct { + ID string `json:"id"` +} + +// Monitor represents a load balancer health monitor. A health monitor is used +// to determine whether or not back-end members of the VIP's pool are usable +// for processing a request. A pool can have several health monitors associated +// with it. There are different types of health monitors supported: +// +// PING: used to ping the members using ICMP. +// TCP: used to connect to the members using TCP. +// HTTP: used to send an HTTP request to the member. +// HTTPS: used to send a secure HTTP request to the member. +// +// When a pool has several monitors associated with it, each member of the pool +// is monitored by all these monitors. If any monitor declares the member as +// unhealthy, then the member status is changed to INACTIVE and the member +// won't participate in its pool's load balancing. In other words, ALL monitors +// must declare the member to be healthy for it to stay ACTIVE. +type Monitor struct { + // The unique ID for the Monitor. + ID string `json:"id"` + + // The Name of the Monitor. + Name string `json:"name"` + + // TenantID is the owner of the Monitor. + TenantID string `json:"tenant_id"` + + // The type of probe sent by the load balancer to verify the member state, + // which is PING, TCP, HTTP, or HTTPS. + Type string `json:"type"` + + // The time, in seconds, between sending probes to members. + Delay int `json:"delay"` + + // The maximum number of seconds for a monitor to wait for a connection to be + // established before it times out. This value must be less than the delay + // value. + Timeout int `json:"timeout"` + + // Number of allowed connection failures before changing the status of the + // member to INACTIVE. A valid value is from 1 to 10. + MaxRetries int `json:"max_retries"` + + // The HTTP method that the monitor uses for requests. + HTTPMethod string `json:"http_method"` + + // The HTTP path of the request sent by the monitor to test the health of a + // member. Must be a string beginning with a forward slash (/). + URLPath string `json:"url_path" ` + + // Expected HTTP codes for a passing HTTP(S) monitor. + ExpectedCodes string `json:"expected_codes"` + + // The administrative state of the health monitor, which is up (true) or + // down (false). + AdminStateUp bool `json:"admin_state_up"` + + // The status of the health monitor. Indicates whether the health monitor is + // operational. + Status string `json:"status"` + + // List of pools that are associated with the health monitor. + Pools []PoolID `json:"pools"` + + // The provisioning status of the monitor. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// MonitorPage is the page returned by a pager when traversing over a +// collection of health monitors. +type MonitorPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of monitors has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MonitorPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"healthmonitors_links"` + } + + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MonitorPage struct is empty. +func (r MonitorPage) IsEmpty() (bool, error) { + is, err := ExtractMonitors(r) + return len(is) == 0, err +} + +// ExtractMonitors accepts a Page struct, specifically a MonitorPage struct, +// and extracts the elements into a slice of Monitor structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMonitors(r pagination.Page) ([]Monitor, error) { + var s struct { + Monitors []Monitor `json:"healthmonitors"` + } + err := (r.(MonitorPage)).ExtractInto(&s) + return s.Monitors, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a monitor. +func (r commonResult) Extract() (*Monitor, error) { + var s struct { + Monitor *Monitor `json:"healthmonitor"` + } + err := r.ExtractInto(&s) + return s.Monitor, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Monitor. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Monitor. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Monitor. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the result succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/doc.go new file mode 100644 index 000000000000..e2b6f12a9285 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/doc.go @@ -0,0 +1,2 @@ +// monitors unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/fixtures.go new file mode 100644 index 000000000000..6d3eb01ee450 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/fixtures.go @@ -0,0 +1,215 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HealthmonitorsListBody contains the canned body of a healthmonitor list response. +const HealthmonitorsListBody = ` +{ + "healthmonitors":[ + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":10, + "name":"web", + "max_retries":1, + "timeout":1, + "type":"PING", + "pools": [{"id": "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d"}], + "id":"466c8345-28d8-4f84-a246-e04380b0461d" + }, + { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "name":"db", + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } + ] +} +` + +// SingleHealthmonitorBody is the canned body of a Get request on an existing healthmonitor. +const SingleHealthmonitorBody = ` +{ + "healthmonitor": { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":5, + "name":"db", + "expected_codes":"200", + "max_retries":2, + "http_method":"GET", + "timeout":2, + "url_path":"/", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } +} +` + +// PostUpdateHealthmonitorBody is the canned response body of a Update request on an existing healthmonitor. +const PostUpdateHealthmonitorBody = ` +{ + "healthmonitor": { + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "delay":3, + "name":"NewHealthmonitorName", + "expected_codes":"301", + "max_retries":10, + "http_method":"GET", + "timeout":20, + "url_path":"/another_check", + "type":"HTTP", + "pools": [{"id": "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}], + "id":"5d4b5228-33b0-4e60-b225-9b727c1a20e7" + } +} +` + +var ( + HealthmonitorWeb = monitors.Monitor{ + AdminStateUp: true, + Name: "web", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 10, + MaxRetries: 1, + Timeout: 1, + Type: "PING", + ID: "466c8345-28d8-4f84-a246-e04380b0461d", + Pools: []monitors.PoolID{{ID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d"}}, + } + HealthmonitorDb = monitors.Monitor{ + AdminStateUp: true, + Name: "db", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 5, + ExpectedCodes: "200", + MaxRetries: 2, + Timeout: 2, + URLPath: "/", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + Pools: []monitors.PoolID{{ID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}}, + } + HealthmonitorUpdated = monitors.Monitor{ + AdminStateUp: true, + Name: "NewHealthmonitorName", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + Delay: 3, + ExpectedCodes: "301", + MaxRetries: 10, + Timeout: 20, + URLPath: "/another_check", + Type: "HTTP", + HTTPMethod: "GET", + ID: "5d4b5228-33b0-4e60-b225-9b727c1a20e7", + Pools: []monitors.PoolID{{ID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d"}}, + } +) + +// HandleHealthmonitorListSuccessfully sets up the test server to respond to a healthmonitor List request. +func HandleHealthmonitorListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, HealthmonitorsListBody) + case "556c8345-28d8-4f84-a246-e04380b0461d": + fmt.Fprintf(w, `{ "healthmonitors": [] }`) + default: + t.Fatalf("/v2.0/lbaas/healthmonitors invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleHealthmonitorCreationSuccessfully sets up the test server to respond to a healthmonitor creation request +// with a given response. +func HandleHealthmonitorCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "healthmonitor": { + "type":"HTTP", + "pool_id":"84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + "tenant_id":"453105b9-1754-413f-aab1-55f1af620750", + "delay":20, + "name":"db", + "timeout":10, + "max_retries":5, + "url_path":"/check", + "expected_codes":"200-299" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleHealthmonitorGetSuccessfully sets up the test server to respond to a healthmonitor Get request. +func HandleHealthmonitorGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleHealthmonitorBody) + }) +} + +// HandleHealthmonitorDeletionSuccessfully sets up the test server to respond to a healthmonitor deletion request. +func HandleHealthmonitorDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleHealthmonitorUpdateSuccessfully sets up the test server to respond to a healthmonitor Update request. +func HandleHealthmonitorUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/healthmonitors/5d4b5228-33b0-4e60-b225-9b727c1a20e7", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "healthmonitor": { + "name": "NewHealthmonitorName", + "delay": 3, + "timeout": 20, + "max_retries": 10, + "url_path": "/another_check", + "expected_codes": "301" + } + }`) + + fmt.Fprintf(w, PostUpdateHealthmonitorBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/requests_test.go new file mode 100644 index 000000000000..743d9c1c6bfc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/testing/requests_test.go @@ -0,0 +1,154 @@ +package testing + +import ( + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListHealthmonitors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorListSuccessfully(t) + + pages := 0 + err := monitors.List(fake.ServiceClient(), monitors.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := monitors.ExtractMonitors(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 healthmonitors, got %d", len(actual)) + } + th.CheckDeepEquals(t, HealthmonitorWeb, actual[0]) + th.CheckDeepEquals(t, HealthmonitorDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllHealthmonitors(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorListSuccessfully(t) + + allPages, err := monitors.List(fake.ServiceClient(), monitors.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := monitors.ExtractMonitors(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, HealthmonitorWeb, actual[0]) + th.CheckDeepEquals(t, HealthmonitorDb, actual[1]) +} + +func TestCreateHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorCreationSuccessfully(t, SingleHealthmonitorBody) + + actual, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + Name: "db", + PoolID: "84f1b61f-58c4-45bf-a8a9-2dafb9e5214d", + TenantID: "453105b9-1754-413f-aab1-55f1af620750", + Delay: 20, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, HealthmonitorDb, *actual) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = monitors.Create(fake.ServiceClient(), monitors.CreateOpts{Type: monitors.TypeHTTP}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGetHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := monitors.Get(client, "5d4b5228-33b0-4e60-b225-9b727c1a20e7").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, HealthmonitorDb, *actual) +} + +func TestDeleteHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorDeletionSuccessfully(t) + + res := monitors.Delete(fake.ServiceClient(), "5d4b5228-33b0-4e60-b225-9b727c1a20e7") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateHealthmonitor(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleHealthmonitorUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := monitors.Update(client, "5d4b5228-33b0-4e60-b225-9b727c1a20e7", monitors.UpdateOpts{ + Name: "NewHealthmonitorName", + Delay: 3, + Timeout: 20, + MaxRetries: 10, + URLPath: "/another_check", + ExpectedCodes: "301", + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, HealthmonitorUpdated, *actual) +} + +func TestDelayMustBeGreaterOrEqualThanTimeout(t *testing.T) { + _, err := monitors.Create(fake.ServiceClient(), monitors.CreateOpts{ + Type: "HTTP", + PoolID: "d459f7d8-c6ee-439d-8713-d3fc08aeed8d", + Delay: 1, + Timeout: 10, + MaxRetries: 5, + URLPath: "/check", + ExpectedCodes: "200-299", + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } + + _, err = monitors.Update(fake.ServiceClient(), "453105b9-1754-413f-aab1-55f1af620750", monitors.UpdateOpts{ + Delay: 1, + Timeout: 10, + }).Extract() + + if err == nil { + t.Fatalf("Expected error, got none") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go new file mode 100644 index 000000000000..a222e52a93dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors/urls.go @@ -0,0 +1,16 @@ +package monitors + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "healthmonitors" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go new file mode 100644 index 000000000000..2d57ed439385 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/doc.go @@ -0,0 +1,124 @@ +/* +Package pools provides information and interaction with Pools and +Members of the LBaaS v2 extension for the OpenStack Networking service. + +Example to List Pools + + listOpts := pools.ListOpts{ + LoadbalancerID: "c79a4468-d788-410c-bf79-9a8ef6354852", + } + + allPages, err := pools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPools, err := pools.ExtractMonitors(allPages) + if err != nil { + panic(err) + } + + for _, pools := range allPools { + fmt.Printf("%+v\n", pool) + } + +Example to Create a Pool + + createOpts := pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + } + + pool, err := pools.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + updateOpts := pools.UpdateOpts{ + Name: "new-name", + } + + pool, err := pools.Update(networkClient, poolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Pool + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + err := pools.Delete(networkClient, poolID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List Pool Members + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + listOpts := pools.ListMemberOpts{ + ProtocolPort: 80, + } + + allPages, err := pools.ListMembers(networkClient, poolID, listOpts).AllPages() + if err != nil { + panic(err) + } + + allMembers, err := pools.ExtractMembers(allPages) + if err != nil { + panic(err) + } + + for _, member := allMembers { + fmt.Printf("%+v\n", member) + } + +Example to Create a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + + createOpts := pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: 10, + } + + member, err := pools.CreateMember(networkClient, poolID, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + updateOpts := pools.UpdateMemberOpts{ + Name: "new-name", + Weight: 4, + } + + member, err := pools.UpdateMember(networkClient, poolID, memberID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Member + + poolID := "d67d56a6-4a86-4688-a282-f46444705c64" + memberID := "64dba99f-8af8-4200-8882-e32a0660f23e" + + err := pools.DeleteMember(networkClient, poolID, memberID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package pools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go new file mode 100644 index 000000000000..11564be83fee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/requests.go @@ -0,0 +1,356 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Pool attributes you want to see returned. SortKey allows you to +// sort by a particular Pool attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + LBMethod string `q:"lb_algorithm"` + Protocol string `q:"protocol"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + AdminStateUp *bool `q:"admin_state_up"` + Name string `q:"name"` + ID string `q:"id"` + LoadbalancerID string `q:"loadbalancer_id"` + ListenerID string `q:"listener_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// pools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those pools that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type LBMethod string +type Protocol string + +// Supported attributes for create/update operations. +const ( + LBMethodRoundRobin LBMethod = "ROUND_ROBIN" + LBMethodLeastConnections LBMethod = "LEAST_CONNECTIONS" + LBMethodSourceIp LBMethod = "SOURCE_IP" + + ProtocolTCP Protocol = "TCP" + ProtocolHTTP Protocol = "HTTP" + ProtocolHTTPS Protocol = "HTTPS" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm" required:"true"` + + // The protocol used by the pool members, you can use either + // ProtocolTCP, ProtocolHTTP, or ProtocolHTTPS. + Protocol Protocol `json:"protocol" required:"true"` + + // The Loadbalancer on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + LoadbalancerID string `json:"loadbalancer_id,omitempty" xor:"ListenerID"` + + // The Listener on which the members of the pool will be associated with. + // Note: one of LoadbalancerID or ListenerID must be provided. + ListenerID string `json:"listener_id,omitempty" xor:"LoadbalancerID"` + + // TenantID is the UUID of the project who owns the Pool. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Pool. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // Persistence is the session persistence of the pool. + // Omit this field to prevent session persistence. + Persistence *SessionPersistence `json:"session_persistence,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// load balancer pool. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular pool based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts is the common options struct used in this package's Update +// operation. +type UpdateOpts struct { + // Name of the pool. + Name string `json:"name,omitempty"` + + // Human-readable description for the pool. + Description string `json:"description,omitempty"` + + // The algorithm used to distribute load between the members of the pool. The + // current specification supports LBMethodRoundRobin, LBMethodLeastConnections + // and LBMethodSourceIp as valid values for this attribute. + LBMethod LBMethod `json:"lb_algorithm,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "pool") +} + +// Update allows pools to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will permanently delete a particular pool based on its unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListMemberOptsBuilder allows extensions to add additional parameters to the +// ListMembers request. +type ListMembersOptsBuilder interface { + ToMembersListQuery() (string, error) +} + +// ListMembersOpts allows the filtering and sorting of paginated collections +// through the API. Filtering is achieved by passing in struct field values +// that map to the Member attributes you want to see returned. SortKey allows +// you to sort by a particular Member attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListMembersOpts struct { + Name string `q:"name"` + Weight int `q:"weight"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + Address string `q:"address"` + ProtocolPort int `q:"protocol_port"` + ID string `q:"id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToMemberListQuery formats a ListOpts into a query string. +func (opts ListMembersOpts) ToMembersListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListMembers returns a Pager which allows you to iterate over a collection of +// members. It accepts a ListMembersOptsBuilder, which allows you to filter and +// sort the returned collection for greater efficiency. +// +// Default policy settings return only those members that are owned by the +// tenant who submits the request, unless an admin user submits the request. +func ListMembers(c *gophercloud.ServiceClient, poolID string, opts ListMembersOptsBuilder) pagination.Pager { + url := memberRootURL(c, poolID) + if opts != nil { + query, err := opts.ToMembersListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return MemberPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateMemberOptsBuilder allows extensions to add additional parameters to the +// CreateMember request. +type CreateMemberOptsBuilder interface { + ToMemberCreateMap() (map[string]interface{}, error) +} + +// CreateMemberOpts is the common options struct used in this package's CreateMember +// operation. +type CreateMemberOpts struct { + // The IP address of the member to receive traffic from the load balancer. + Address string `json:"address" required:"true"` + + // The port on which to listen for client traffic. + ProtocolPort int `json:"protocol_port" required:"true"` + + // Name of the Member. + Name string `json:"name,omitempty"` + + // TenantID is the UUID of the project who owns the Member. + // Only administrative users can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Member. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // If you omit this parameter, LBaaS uses the vip_subnet_id parameter value + // for the subnet UUID. + SubnetID string `json:"subnet_id,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberCreateMap builds a request body from CreateMemberOpts. +func (opts CreateMemberOpts) ToMemberCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// CreateMember will create and associate a Member with a particular Pool. +func CreateMember(c *gophercloud.ServiceClient, poolID string, opts CreateMemberOpts) (r CreateMemberResult) { + b, err := opts.ToMemberCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(memberRootURL(c, poolID), b, &r.Body, nil) + return +} + +// GetMember retrieves a particular Pool Member based on its unique ID. +func GetMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r GetMemberResult) { + _, r.Err = c.Get(memberResourceURL(c, poolID, memberID), &r.Body, nil) + return +} + +// UpdateMemberOptsBuilder allows extensions to add additional parameters to the +// List request. +type UpdateMemberOptsBuilder interface { + ToMemberUpdateMap() (map[string]interface{}, error) +} + +// UpdateMemberOpts is the common options struct used in this package's Update +// operation. +type UpdateMemberOpts struct { + // Name of the Member. + Name string `json:"name,omitempty"` + + // A positive integer value that indicates the relative portion of traffic + // that this member should receive from the pool. For example, a member with + // a weight of 10 receives five times as much traffic as a member with a + // weight of 2. + Weight int `json:"weight,omitempty"` + + // The administrative state of the Pool. A valid value is true (UP) + // or false (DOWN). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToMemberUpdateMap builds a request body from UpdateMemberOpts. +func (opts UpdateMemberOpts) ToMemberUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "member") +} + +// Update allows Member to be updated. +func UpdateMember(c *gophercloud.ServiceClient, poolID string, memberID string, opts UpdateMemberOptsBuilder) (r UpdateMemberResult) { + b, err := opts.ToMemberUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(memberResourceURL(c, poolID, memberID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201, 202}, + }) + return +} + +// DisassociateMember will remove and disassociate a Member from a particular +// Pool. +func DeleteMember(c *gophercloud.ServiceClient, poolID string, memberID string) (r DeleteMemberResult) { + _, r.Err = c.Delete(memberResourceURL(c, poolID, memberID), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go new file mode 100644 index 000000000000..01efc768c225 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/results.go @@ -0,0 +1,281 @@ +package pools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/monitors" + "github.com/gophercloud/gophercloud/pagination" +) + +// SessionPersistence represents the session persistence feature of the load +// balancing service. It attempts to force connections or requests in the same +// session to be processed by the same member as long as it is ative. Three +// types of persistence are supported: +// +// SOURCE_IP: With this mode, all connections originating from the same source +// IP address, will be handled by the same Member of the Pool. +// HTTP_COOKIE: With this persistence mode, the load balancing function will +// create a cookie on the first request from a client. Subsequent +// requests containing the same cookie value will be handled by +// the same Member of the Pool. +// APP_COOKIE: With this persistence mode, the load balancing function will +// rely on a cookie established by the backend application. All +// requests carrying the same cookie value will be handled by the +// same Member of the Pool. +type SessionPersistence struct { + // The type of persistence mode. + Type string `json:"type"` + + // Name of cookie if persistence mode is set appropriately. + CookieName string `json:"cookie_name,omitempty"` +} + +// LoadBalancerID represents a load balancer. +type LoadBalancerID struct { + ID string `json:"id"` +} + +// ListenerID represents a listener. +type ListenerID struct { + ID string `json:"id"` +} + +// Pool represents a logical set of devices, such as web servers, that you +// group together to receive and process traffic. The load balancing function +// chooses a Member of the Pool according to the configured load balancing +// method to handle the new requests or connections received on the VIP address. +type Pool struct { + // The load-balancer algorithm, which is round-robin, least-connections, and + // so on. This value, which must be supported, is dependent on the provider. + // Round-robin must be supported. + LBMethod string `json:"lb_algorithm"` + + // The protocol of the Pool, which is TCP, HTTP, or HTTPS. + Protocol string `json:"protocol"` + + // Description for the Pool. + Description string `json:"description"` + + // A list of listeners objects IDs. + Listeners []ListenerID `json:"listeners"` //[]map[string]interface{} + + // A list of member objects IDs. + Members []Member `json:"members"` + + // The ID of associated health monitor. + MonitorID string `json:"healthmonitor_id"` + + // The network on which the members of the Pool will be located. Only members + // that are on this network can be added to the Pool. + SubnetID string `json:"subnet_id"` + + // Owner of the Pool. + TenantID string `json:"tenant_id"` + + // The administrative state of the Pool, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Pool name. Does not have to be unique. + Name string `json:"name"` + + // The unique ID for the Pool. + ID string `json:"id"` + + // A list of load balancer objects IDs. + Loadbalancers []LoadBalancerID `json:"loadbalancers"` + + // Indicates whether connections in the same session will be processed by the + // same Pool member or not. + Persistence SessionPersistence `json:"session_persistence"` + + // The load balancer provider. + Provider string `json:"provider"` + + // The Monitor associated with this Pool. + Monitor monitors.Monitor `json:"healthmonitor"` + + // The provisioning status of the pool. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// PoolPage is the page returned by a pager when traversing over a +// collection of pools. +type PoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of pools has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"pools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PoolPage struct is empty. +func (r PoolPage) IsEmpty() (bool, error) { + is, err := ExtractPools(r) + return len(is) == 0, err +} + +// ExtractPools accepts a Page struct, specifically a PoolPage struct, +// and extracts the elements into a slice of Pool structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPools(r pagination.Page) ([]Pool, error) { + var s struct { + Pools []Pool `json:"pools"` + } + err := (r.(PoolPage)).ExtractInto(&s) + return s.Pools, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a pool. +func (r commonResult) Extract() (*Pool, error) { + var s struct { + Pool *Pool `json:"pool"` + } + err := r.ExtractInto(&s) + return s.Pool, err +} + +// CreateResult represents the result of a Create operation. Call its Extract +// method to interpret the result as a Pool. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret the result as a Pool. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an Update operation. Call its Extract +// method to interpret the result as a Pool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a Delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Member represents the application running on a backend server. +type Member struct { + // Name of the Member. + Name string `json:"name"` + + // Weight of Member. + Weight int `json:"weight"` + + // The administrative state of the member, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Owner of the Member. + TenantID string `json:"tenant_id"` + + // Parameter value for the subnet UUID. + SubnetID string `json:"subnet_id"` + + // The Pool to which the Member belongs. + PoolID string `json:"pool_id"` + + // The IP address of the Member. + Address string `json:"address"` + + // The port on which the application is hosted. + ProtocolPort int `json:"protocol_port"` + + // The unique ID for the Member. + ID string `json:"id"` + + // The provisioning status of the member. + // This value is ACTIVE, PENDING_* or ERROR. + ProvisioningStatus string `json:"provisioning_status"` +} + +// MemberPage is the page returned by a pager when traversing over a +// collection of Members in a Pool. +type MemberPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of members has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r MemberPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"members_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a MemberPage struct is empty. +func (r MemberPage) IsEmpty() (bool, error) { + is, err := ExtractMembers(r) + return len(is) == 0, err +} + +// ExtractMembers accepts a Page struct, specifically a MemberPage struct, +// and extracts the elements into a slice of Members structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractMembers(r pagination.Page) ([]Member, error) { + var s struct { + Members []Member `json:"members"` + } + err := (r.(MemberPage)).ExtractInto(&s) + return s.Members, err +} + +type commonMemberResult struct { + gophercloud.Result +} + +// ExtractMember is a function that accepts a result and extracts a member. +func (r commonMemberResult) Extract() (*Member, error) { + var s struct { + Member *Member `json:"member"` + } + err := r.ExtractInto(&s) + return s.Member, err +} + +// CreateMemberResult represents the result of a CreateMember operation. +// Call its Extract method to interpret it as a Member. +type CreateMemberResult struct { + commonMemberResult +} + +// GetMemberResult represents the result of a GetMember operation. +// Call its Extract method to interpret it as a Member. +type GetMemberResult struct { + commonMemberResult +} + +// UpdateMemberResult represents the result of an UpdateMember operation. +// Call its Extract method to interpret it as a Member. +type UpdateMemberResult struct { + commonMemberResult +} + +// DeleteMemberResult represents the result of a DeleteMember operation. +// Call its ExtractErr method to determine if the request succeeded or failed. +type DeleteMemberResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/doc.go new file mode 100644 index 000000000000..46e335f3f2d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/doc.go @@ -0,0 +1,2 @@ +// pools unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/fixtures.go new file mode 100644 index 000000000000..df9d1fd05c37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/fixtures.go @@ -0,0 +1,388 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// PoolsListBody contains the canned body of a pool list response. +const PoolsListBody = ` +{ + "pools":[ + { + "lb_algorithm":"ROUND_ROBIN", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "466c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "53306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"72741b06-df4d-4715-b142-276b6bce75ab", + "name":"web", + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + }, + { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } + ] +} +` + +// SinglePoolBody is the canned body of a Get request on an existing pool. +const SinglePoolBody = ` +{ + "pool": { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } +} +` + +// PostUpdatePoolBody is the canned response body of a Update request on an existing pool. +const PostUpdatePoolBody = ` +{ + "pool": { + "lb_algorithm":"LEAST_CONNECTION", + "protocol":"HTTP", + "description":"", + "healthmonitor_id": "5f6c8345-28d8-4f84-a246-e04380b0461d", + "members":[{"id": "67306cda-815d-4354-9fe4-59e09da9c3c5"}], + "listeners":[{"id": "2a280670-c202-4b0b-a562-34077415aabf"}], + "loadbalancers":[{"id": "79e05663-7f03-45d2-a092-8b94062f22ab"}], + "id":"c3741b06-df4d-4715-b142-276b6bce75ab", + "name":"db", + "admin_state_up":true, + "tenant_id":"83657cfcdfe44cd5920adaf26c48ceea", + "provider": "haproxy" + } +} +` + +var ( + PoolWeb = pools.Pool{ + LBMethod: "ROUND_ROBIN", + Protocol: "HTTP", + Description: "", + MonitorID: "466c8345-28d8-4f84-a246-e04380b0461d", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "web", + Members: []pools.Member{{ID: "53306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "72741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } + PoolDb = pools.Pool{ + LBMethod: "LEAST_CONNECTION", + Protocol: "HTTP", + Description: "", + MonitorID: "5f6c8345-28d8-4f84-a246-e04380b0461d", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "db", + Members: []pools.Member{{ID: "67306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "c3741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } + PoolUpdated = pools.Pool{ + LBMethod: "LEAST_CONNECTION", + Protocol: "HTTP", + Description: "", + MonitorID: "5f6c8345-28d8-4f84-a246-e04380b0461d", + TenantID: "83657cfcdfe44cd5920adaf26c48ceea", + AdminStateUp: true, + Name: "db", + Members: []pools.Member{{ID: "67306cda-815d-4354-9fe4-59e09da9c3c5"}}, + ID: "c3741b06-df4d-4715-b142-276b6bce75ab", + Loadbalancers: []pools.LoadBalancerID{{ID: "79e05663-7f03-45d2-a092-8b94062f22ab"}}, + Listeners: []pools.ListenerID{{ID: "2a280670-c202-4b0b-a562-34077415aabf"}}, + Provider: "haproxy", + } +) + +// HandlePoolListSuccessfully sets up the test server to respond to a pool List request. +func HandlePoolListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, PoolsListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "pools": [] }`) + default: + t.Fatalf("/v2.0/lbaas/pools invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandlePoolCreationSuccessfully sets up the test server to respond to a pool creation request +// with a given response. +func HandlePoolCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/pools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "pool": { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "name": "Example pool", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "loadbalancer_id": "79e05663-7f03-45d2-a092-8b94062f22ab" + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandlePoolGetSuccessfully sets up the test server to respond to a pool Get request. +func HandlePoolGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SinglePoolBody) + }) +} + +// HandlePoolDeletionSuccessfully sets up the test server to respond to a pool deletion request. +func HandlePoolDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandlePoolUpdateSuccessfully sets up the test server to respond to a pool Update request. +func HandlePoolUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/c3741b06-df4d-4715-b142-276b6bce75ab", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "pool": { + "name": "NewPoolName", + "lb_algorithm": "LEAST_CONNECTIONS" + } + }`) + + fmt.Fprintf(w, PostUpdatePoolBody) + }) +} + +// MembersListBody contains the canned body of a member list response. +const MembersListBody = ` +{ + "members":[ + { + "id": "2a280670-c202-4b0b-a562-34077415aabf", + "address": "10.0.2.10", + "weight": 5, + "name": "web", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":true, + "protocol_port": 80 + }, + { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } + ] +} +` + +// SingleMemberBody is the canned body of a Get request on an existing member. +const SingleMemberBody = ` +{ + "member": { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } +} +` + +// PostUpdateMemberBody is the canned response body of a Update request on an existing member. +const PostUpdateMemberBody = ` +{ + "member": { + "id": "fad389a3-9a4a-4762-a365-8c7038508b5d", + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "admin_state_up":false, + "protocol_port": 80 + } +} +` + +var ( + MemberWeb = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: true, + Name: "web", + ID: "2a280670-c202-4b0b-a562-34077415aabf", + Address: "10.0.2.10", + Weight: 5, + ProtocolPort: 80, + } + MemberDb = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: false, + Name: "db", + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Address: "10.0.2.11", + Weight: 10, + ProtocolPort: 80, + } + MemberUpdated = pools.Member{ + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + AdminStateUp: false, + Name: "db", + ID: "fad389a3-9a4a-4762-a365-8c7038508b5d", + Address: "10.0.2.11", + Weight: 10, + ProtocolPort: 80, + } +) + +// HandleMemberListSuccessfully sets up the test server to respond to a member List request. +func HandleMemberListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, MembersListBody) + case "45e08a3e-a78f-4b40-a229-1e7e23eee1ab": + fmt.Fprintf(w, `{ "members": [] }`) + default: + t.Fatalf("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members invoked with unexpected marker=[%s]", marker) + } + }) +} + +// HandleMemberCreationSuccessfully sets up the test server to respond to a member creation request +// with a given response. +func HandleMemberCreationSuccessfully(t *testing.T, response string) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestJSONRequest(t, r, `{ + "member": { + "address": "10.0.2.11", + "weight": 10, + "name": "db", + "subnet_id": "1981f108-3c48-48d2-b908-30f7d28532c9", + "tenant_id": "2ffc6e22aae24e4795f87155d24c896f", + "protocol_port": 80 + } + }`) + + w.WriteHeader(http.StatusAccepted) + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, response) + }) +} + +// HandleMemberGetSuccessfully sets up the test server to respond to a member Get request. +func HandleMemberGetSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + fmt.Fprintf(w, SingleMemberBody) + }) +} + +// HandleMemberDeletionSuccessfully sets up the test server to respond to a member deletion request. +func HandleMemberDeletionSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleMemberUpdateSuccessfully sets up the test server to respond to a member Update request. +func HandleMemberUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/lbaas/pools/332abe93-f488-41ba-870b-2ac66be7f853/members/2a280670-c202-4b0b-a562-34077415aabf", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestJSONRequest(t, r, `{ + "member": { + "name": "newMemberName", + "weight": 4 + } + }`) + + fmt.Fprintf(w, PostUpdateMemberBody) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/requests_test.go new file mode 100644 index 000000000000..4af00ecc2576 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/testing/requests_test.go @@ -0,0 +1,262 @@ +package testing + +import ( + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestListPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolListSuccessfully(t) + + pages := 0 + err := pools.List(fake.ServiceClient(), pools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := pools.ExtractPools(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 pools, got %d", len(actual)) + } + th.CheckDeepEquals(t, PoolWeb, actual[0]) + th.CheckDeepEquals(t, PoolDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllPools(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolListSuccessfully(t) + + allPages, err := pools.List(fake.ServiceClient(), pools.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := pools.ExtractPools(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, PoolWeb, actual[0]) + th.CheckDeepEquals(t, PoolDb, actual[1]) +} + +func TestCreatePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolCreationSuccessfully(t, SinglePoolBody) + + actual, err := pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: "HTTP", + Name: "Example pool", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + LoadbalancerID: "79e05663-7f03-45d2-a092-8b94062f22ab", + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, PoolDb, *actual) +} + +func TestGetPool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.Get(client, "c3741b06-df4d-4715-b142-276b6bce75ab").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, PoolDb, *actual) +} + +func TestDeletePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolDeletionSuccessfully(t) + + res := pools.Delete(fake.ServiceClient(), "c3741b06-df4d-4715-b142-276b6bce75ab") + th.AssertNoErr(t, res.Err) +} + +func TestUpdatePool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePoolUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.Update(client, "c3741b06-df4d-4715-b142-276b6bce75ab", pools.UpdateOpts{ + Name: "NewPoolName", + LBMethod: pools.LBMethodLeastConnections, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, PoolUpdated, *actual) +} + +func TestRequiredPoolCreateOpts(t *testing.T) { + res := pools.Create(fake.ServiceClient(), pools.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethod("invalid"), + Protocol: pools.ProtocolHTTPS, + LoadbalancerID: "69055154-f603-4a28-8951-7cc2d9e54a9a", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: pools.Protocol("invalid"), + LoadbalancerID: "69055154-f603-4a28-8951-7cc2d9e54a9a", + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + + res = pools.Create(fake.ServiceClient(), pools.CreateOpts{ + LBMethod: pools.LBMethodRoundRobin, + Protocol: pools.ProtocolHTTPS, + }) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestListMembers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberListSuccessfully(t) + + pages := 0 + err := pools.ListMembers(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.ListMembersOpts{}).EachPage(func(page pagination.Page) (bool, error) { + pages++ + + actual, err := pools.ExtractMembers(page) + if err != nil { + return false, err + } + + if len(actual) != 2 { + t.Fatalf("Expected 2 members, got %d", len(actual)) + } + th.CheckDeepEquals(t, MemberWeb, actual[0]) + th.CheckDeepEquals(t, MemberDb, actual[1]) + + return true, nil + }) + + th.AssertNoErr(t, err) + + if pages != 1 { + t.Errorf("Expected 1 page, saw %d", pages) + } +} + +func TestListAllMembers(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberListSuccessfully(t) + + allPages, err := pools.ListMembers(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.ListMembersOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := pools.ExtractMembers(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, MemberWeb, actual[0]) + th.CheckDeepEquals(t, MemberDb, actual[1]) +} + +func TestCreateMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberCreationSuccessfully(t, SingleMemberBody) + + actual, err := pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{ + Name: "db", + SubnetID: "1981f108-3c48-48d2-b908-30f7d28532c9", + TenantID: "2ffc6e22aae24e4795f87155d24c896f", + Address: "10.0.2.11", + ProtocolPort: 80, + Weight: 10, + }).Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, MemberDb, *actual) +} + +func TestRequiredMemberCreateOpts(t *testing.T) { + res := pools.CreateMember(fake.ServiceClient(), "", pools.CreateMemberOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = pools.CreateMember(fake.ServiceClient(), "", pools.CreateMemberOpts{Address: "1.2.3.4", ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{ProtocolPort: 80}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } + res = pools.CreateMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", pools.CreateMemberOpts{Address: "1.2.3.4"}) + if res.Err == nil { + t.Fatalf("Expected error, but got none") + } +} + +func TestGetMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberGetSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.GetMember(client, "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf").Extract() + if err != nil { + t.Fatalf("Unexpected Get error: %v", err) + } + + th.CheckDeepEquals(t, MemberDb, *actual) +} + +func TestDeleteMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberDeletionSuccessfully(t) + + res := pools.DeleteMember(fake.ServiceClient(), "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf") + th.AssertNoErr(t, res.Err) +} + +func TestUpdateMember(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMemberUpdateSuccessfully(t) + + client := fake.ServiceClient() + actual, err := pools.UpdateMember(client, "332abe93-f488-41ba-870b-2ac66be7f853", "2a280670-c202-4b0b-a562-34077415aabf", pools.UpdateMemberOpts{ + Name: "newMemberName", + Weight: 4, + }).Extract() + if err != nil { + t.Fatalf("Unexpected Update error: %v", err) + } + + th.CheckDeepEquals(t, MemberUpdated, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go new file mode 100644 index 000000000000..bceca67707f2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/lbaas_v2/pools/urls.go @@ -0,0 +1,25 @@ +package pools + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "lbaas" + resourcePath = "pools" + memberPath = "members" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} + +func memberRootURL(c *gophercloud.ServiceClient, poolId string) string { + return c.ServiceURL(rootPath, resourcePath, poolId, memberPath) +} + +func memberResourceURL(c *gophercloud.ServiceClient, poolID string, memeberID string) string { + return c.ServiceURL(rootPath, resourcePath, poolID, memberPath, memeberID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/doc.go new file mode 100644 index 000000000000..910947369303 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/doc.go @@ -0,0 +1,30 @@ +/* +Package networkipavailabilities provides the ability to retrieve and manage +networkipavailabilities through the Neutron API. + +Example of Listing NetworkIPAvailabilities + + allPages, err := networkipavailabilities.List(networkClient, networkipavailabilities.ListOpts{}).AllPages() + if err != nil { + panic(err) + } + + allAvailabilities, err := subnetpools.ExtractSubnetPools(allPages) + if err != nil { + panic(err) + } + + for _, availability := range allAvailabilities { + fmt.Printf("%+v\n", availability) + } + +Example of Getting a single NetworkIPAvailability + + availability, err := networkipavailabilities.Get(networkClient, "cf11ab78-2302-49fa-870f-851a08c7afb8").Extract() + if err != nil { + panic(err) + } + + fmt.Printf("%+v\n", availability) +*/ +package networkipavailabilities diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/requests.go new file mode 100644 index 000000000000..c024d7a7b851 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/requests.go @@ -0,0 +1,61 @@ +package networkipavailabilities + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkIPAvailabilityListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. +type ListOpts struct { + // NetworkName allows to filter on the identifier of a network. + NetworkID string `q:"network_id"` + + // NetworkName allows to filter on the name of a network. + NetworkName string `q:"network_name"` + + // IPVersion allows to filter on the version of the IP protocol. + // You can use the well-known IP versions with the gophercloud.IPVersion type. + IPVersion string `q:"ip_version"` + + // ProjectID allows to filter on the Identity project field. + ProjectID string `q:"project_id"` + + // TenantID allows to filter on the Identity project field. + TenantID string `q:"tenant_id"` +} + +// ToNetworkIPAvailabilityListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkIPAvailabilityListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// networkipavailabilities. It accepts a ListOpts struct, which allows you to +// filter the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkIPAvailabilityListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkIPAvailabilityPage{pagination.SinglePageBase(r)} + }) +} + +// Get retrieves a specific NetworkIPAvailability based on its ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/results.go new file mode 100644 index 000000000000..4bcf10f49d96 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/results.go @@ -0,0 +1,96 @@ +package networkipavailabilities + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// GetResult represents the result of a Get operation. Call its Extract +// method to interpret it as a NetworkIPAvailability. +type GetResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a NetworkIPAvailability. +func (r commonResult) Extract() (*NetworkIPAvailability, error) { + var s struct { + NetworkIPAvailability *NetworkIPAvailability `json:"network_ip_availability"` + } + err := r.ExtractInto(&s) + return s.NetworkIPAvailability, err +} + +// NetworkIPAvailability represents availability details for a single network. +type NetworkIPAvailability struct { + // NetworkID contains an unique identifier of the network. + NetworkID string `json:"network_id"` + + // NetworkName represents human-readable name of the network. + NetworkName string `json:"network_name"` + + // ProjectID is the ID of the Identity project. + ProjectID string `json:"project_id"` + + // TenantID is the ID of the Identity project. + TenantID string `json:"tenant_id"` + + // SubnetIPAvailabilities contains availability details for every subnet + // that is associated to the network. + SubnetIPAvailabilities []SubnetIPAvailability `json:"subnet_ip_availability"` + + // TotalIPs represents a number of IP addresses in the network. + TotalIPs int `json:"total_ips"` + + // UsedIPs represents a number of used IP addresses in the network. + UsedIPs int `json:"used_ips"` +} + +// SubnetIPAvailability represents availability details for a single subnet. +type SubnetIPAvailability struct { + // SubnetID contains an unique identifier of the subnet. + SubnetID string `json:"subnet_id"` + + // SubnetName represents human-readable name of the subnet. + SubnetName string `json:"subnet_name"` + + // CIDR represents prefix in the CIDR format. + CIDR string `json:"cidr"` + + // IPVersion is the IP protocol version. + IPVersion int `json:"ip_version"` + + // TotalIPs represents a number of IP addresses in the subnet. + TotalIPs int `json:"total_ips"` + + // UsedIPs represents a number of used IP addresses in the subnet. + UsedIPs int `json:"used_ips"` +} + +// NetworkIPAvailabilityPage stores a single page of NetworkIPAvailabilities +// from the List call. +type NetworkIPAvailabilityPage struct { + pagination.SinglePageBase +} + +// IsEmpty determines whether or not a NetworkIPAvailability is empty. +func (r NetworkIPAvailabilityPage) IsEmpty() (bool, error) { + networkipavailabilities, err := ExtractNetworkIPAvailabilities(r) + return len(networkipavailabilities) == 0, err +} + +// ExtractNetworkIPAvailabilities interprets the results of a single page from +// a List() API call, producing a slice of NetworkIPAvailabilities structures. +func ExtractNetworkIPAvailabilities(r pagination.Page) ([]NetworkIPAvailability, error) { + var s struct { + NetworkIPAvailabilities []NetworkIPAvailability `json:"network_ip_availabilities"` + } + err := (r.(NetworkIPAvailabilityPage)).ExtractInto(&s) + if err != nil { + return nil, err + } + return s.NetworkIPAvailabilities, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/doc.go new file mode 100644 index 000000000000..baf115fc0ded --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/doc.go @@ -0,0 +1,2 @@ +// networkipavailabilities unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/fixtures.go new file mode 100644 index 000000000000..2cd993d6bd7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/fixtures.go @@ -0,0 +1,130 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities" +) + +// NetworkIPAvailabilityListResult represents raw server response from a server to a list call. +const NetworkIPAvailabilityListResult = ` +{ + "network_ip_availabilities": [ + { + "network_id": "080ee064-036d-405a-a307-3bde4a213a1b", + "network_name": "private", + "project_id": "fb57277ef2f84a0e85b9018ec2dedbf7", + "subnet_ip_availability": [ + { + "cidr": "10.0.0.64/26", + "ip_version": 4, + "subnet_id": "497ac4d3-0b92-42cf-82de-71302ab2b656", + "subnet_name": "second-private-subnet", + "total_ips": 61, + "used_ips": 12 + }, + { + "cidr": "10.0.0.0/26", + "ip_version": 4, + "subnet_id": "521f47e7-c4fb-452c-b71a-851da38cc571", + "subnet_name": "private-subnet", + "total_ips": 61, + "used_ips": 2 + } + ], + "tenant_id": "fb57277ef2f84a0e85b9018ec2dedbf7", + "total_ips": 122, + "used_ips": 14 + }, + { + "network_id": "cf11ab78-2302-49fa-870f-851a08c7afb8", + "network_name": "public", + "project_id": "424e7cf0243c468ca61732ba45973b3e", + "subnet_ip_availability": [ + { + "cidr": "203.0.113.0/24", + "ip_version": 4, + "subnet_id": "4afe6e5f-9649-40db-b18f-64c7ead942bd", + "subnet_name": "public-subnet", + "total_ips": 253, + "used_ips": 3 + } + ], + "tenant_id": "424e7cf0243c468ca61732ba45973b3e", + "total_ips": 253, + "used_ips": 3 + } + ] +} +` + +// NetworkIPAvailability1 is an expected representation of a first object from the ResourceListResult. +var NetworkIPAvailability1 = networkipavailabilities.NetworkIPAvailability{ + NetworkID: "080ee064-036d-405a-a307-3bde4a213a1b", + NetworkName: "private", + ProjectID: "fb57277ef2f84a0e85b9018ec2dedbf7", + TenantID: "fb57277ef2f84a0e85b9018ec2dedbf7", + TotalIPs: 122, + UsedIPs: 14, + SubnetIPAvailabilities: []networkipavailabilities.SubnetIPAvailability{ + { + SubnetID: "497ac4d3-0b92-42cf-82de-71302ab2b656", + SubnetName: "second-private-subnet", + CIDR: "10.0.0.64/26", + IPVersion: int(gophercloud.IPv4), + TotalIPs: 61, + UsedIPs: 12, + }, + { + SubnetID: "521f47e7-c4fb-452c-b71a-851da38cc571", + SubnetName: "private-subnet", + CIDR: "10.0.0.0/26", + IPVersion: int(gophercloud.IPv4), + TotalIPs: 61, + UsedIPs: 2, + }, + }, +} + +// NetworkIPAvailability2 is an expected representation of a first object from the ResourceListResult. +var NetworkIPAvailability2 = networkipavailabilities.NetworkIPAvailability{ + NetworkID: "cf11ab78-2302-49fa-870f-851a08c7afb8", + NetworkName: "public", + ProjectID: "424e7cf0243c468ca61732ba45973b3e", + TenantID: "424e7cf0243c468ca61732ba45973b3e", + TotalIPs: 253, + UsedIPs: 3, + SubnetIPAvailabilities: []networkipavailabilities.SubnetIPAvailability{ + { + SubnetID: "4afe6e5f-9649-40db-b18f-64c7ead942bd", + SubnetName: "public-subnet", + CIDR: "203.0.113.0/24", + IPVersion: int(gophercloud.IPv4), + TotalIPs: 253, + UsedIPs: 3, + }, + }, +} + +// NetworkIPAvailabilityGetResult represents raw server response from a server to a get call. +const NetworkIPAvailabilityGetResult = ` +{ + "network_ip_availability": { + "network_id": "cf11ab78-2302-49fa-870f-851a08c7afb8", + "network_name": "public", + "project_id": "424e7cf0243c468ca61732ba45973b3e", + "subnet_ip_availability": [ + { + "cidr": "203.0.113.0/24", + "ip_version": 4, + "subnet_id": "4afe6e5f-9649-40db-b18f-64c7ead942bd", + "subnet_name": "public-subnet", + "total_ips": 253, + "used_ips": 3 + } + ], + "tenant_id": "424e7cf0243c468ca61732ba45973b3e", + "total_ips": 253, + "used_ips": 3 + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/requests_test.go new file mode 100644 index 000000000000..820debb2fb64 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/testing/requests_test.go @@ -0,0 +1,87 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/network-ip-availabilities", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, NetworkIPAvailabilityListResult) + }) + + count := 0 + + networkipavailabilities.List(fake.ServiceClient(), networkipavailabilities.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := networkipavailabilities.ExtractNetworkIPAvailabilities(page) + if err != nil { + t.Errorf("Failed to extract network IP availabilities: %v", err) + return false, nil + } + + expected := []networkipavailabilities.NetworkIPAvailability{ + NetworkIPAvailability1, + NetworkIPAvailability2, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/network-ip-availabilities/cf11ab78-2302-49fa-870f-851a08c7afb8", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, NetworkIPAvailabilityGetResult) + }) + + s, err := networkipavailabilities.Get(fake.ServiceClient(), "cf11ab78-2302-49fa-870f-851a08c7afb8").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.NetworkID, "cf11ab78-2302-49fa-870f-851a08c7afb8") + th.AssertEquals(t, s.NetworkName, "public") + th.AssertEquals(t, s.ProjectID, "424e7cf0243c468ca61732ba45973b3e") + th.AssertEquals(t, s.TenantID, "424e7cf0243c468ca61732ba45973b3e") + th.AssertEquals(t, s.TotalIPs, 253) + th.AssertEquals(t, s.UsedIPs, 3) + th.AssertDeepEquals(t, s.SubnetIPAvailabilities, []networkipavailabilities.SubnetIPAvailability{ + { + SubnetID: "4afe6e5f-9649-40db-b18f-64c7ead942bd", + SubnetName: "public-subnet", + CIDR: "203.0.113.0/24", + IPVersion: int(gophercloud.IPv4), + TotalIPs: 253, + UsedIPs: 3, + }, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/urls.go new file mode 100644 index 000000000000..5e3228ff3a49 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/networkipavailabilities/urls.go @@ -0,0 +1,21 @@ +package networkipavailabilities + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "network-ip-availabilities" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, networkIPAvailabilityID string) string { + return c.ServiceURL(resourcePath, networkIPAvailabilityID) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, networkIPAvailabilityID string) string { + return resourceURL(c, networkIPAvailabilityID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go new file mode 100644 index 000000000000..0d2ed5897a34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/doc.go @@ -0,0 +1,3 @@ +// Package portsbinding provides information and interaction with the port +// binding extension for the OpenStack Networking service. +package portsbinding diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go new file mode 100644 index 000000000000..3c287463aa5b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/requests.go @@ -0,0 +1,91 @@ +package portsbinding + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// CreateOptsExt adds port binding options to the base ports.CreateOpts. +type CreateOptsExt struct { + // CreateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Create operation in this package. + ports.CreateOptsBuilder + + // The ID of the host where the port is allocated + HostID string `json:"binding:host_id,omitempty"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type,omitempty"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"binding:profile,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts CreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.HostID != "" { + port["binding:host_id"] = opts.HostID + } + + if opts.VNICType != "" { + port["binding:vnic_type"] = opts.VNICType + } + + if opts.Profile != nil { + port["binding:profile"] = opts.Profile + } + + return base, nil +} + +// UpdateOptsExt adds port binding options to the base ports.UpdateOpts +type UpdateOptsExt struct { + // UpdateOptsBuilder is the interface options structs have to satisfy in order + // to be used in the main Update operation in this package. + ports.UpdateOptsBuilder + + // The ID of the host where the port is allocated. + HostID string `json:"binding:host_id,omitempty"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type,omitempty"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"binding:profile,omitempty"` +} + +// ToPortUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.HostID != "" { + port["binding:host_id"] = opts.HostID + } + + if opts.VNICType != "" { + port["binding:vnic_type"] = opts.VNICType + } + + if opts.Profile != nil { + port["binding:profile"] = opts.Profile + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go new file mode 100644 index 000000000000..c39f7df50fcf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/results.go @@ -0,0 +1,30 @@ +package portsbinding + +// IP is a sub-struct that represents an individual IP. +type IP struct { + SubnetID string `json:"subnet_id"` + IPAddress string `json:"ip_address"` +} + +// PortsBindingExt represents a decorated form of a Port with the additional +// port binding information. +type PortsBindingExt struct { + // The ID of the host where the port is allocated. + HostID string `json:"binding:host_id"` + + // A dictionary that enables the application to pass information about + // functions that the Networking API provides. + VIFDetails map[string]interface{} `json:"binding:vif_details"` + + // The VIF type for the port. + VIFType string `json:"binding:vif_type"` + + // The virtual network interface card (vNIC) type that is bound to the + // neutron port. + VNICType string `json:"binding:vnic_type"` + + // A dictionary that enables the application running on the specified + // host to pass and receive virtual network interface (VIF) port-specific + // information to the plug-in. + Profile map[string]string `json:"binding:profile"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/doc.go new file mode 100644 index 000000000000..abdc76d8a292 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/doc.go @@ -0,0 +1,2 @@ +// portsbindings unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/fixtures.go new file mode 100644 index 000000000000..03fe353910c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/fixtures.go @@ -0,0 +1,150 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + porttest "github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func HandleListSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, porttest.ListResponse) + }) +} + +func HandleGet(t *testing.T) { + th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, porttest.GetResponse) + }) +} + +func HandleCreate(t *testing.T) { + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": ["foo"], + "binding:host_id": "HOST1", + "binding:vnic_type": "normal" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "allowed_address_pairs": [], + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "binding:host_id": "HOST1", + "binding:vnic_type": "normal", + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} + `) + }) +} + +func HandleUpdate(t *testing.T) { + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "binding:host_id": "HOST1", + "binding:vnic_type": "normal" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "", + "binding:host_id": "HOST1", + "binding:vnic_type": "normal" + } +} + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/requests_test.go new file mode 100644 index 000000000000..57901a6b0b55 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding/testing/requests_test.go @@ -0,0 +1,183 @@ +package testing + +import ( + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsbinding" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleListSuccessfully(t) + + type PortWithExt struct { + ports.Port + portsbinding.PortsBindingExt + } + var actual []PortWithExt + + expected := []PortWithExt{ + { + Port: ports.Port{ + Status: "ACTIVE", + Name: "", + AdminStateUp: true, + NetworkID: "70c1db1f-b701-45bd-96e0-a313ee3430b3", + TenantID: "", + DeviceOwner: "network:router_gateway", + MACAddress: "fa:16:3e:58:42:ed", + FixedIPs: []ports.IP{ + { + SubnetID: "008ba151-0b8c-4a67-98b5-0d2b87666062", + IPAddress: "172.24.4.2", + }, + }, + ID: "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + SecurityGroups: []string{}, + DeviceID: "9ae135f4-b6e0-4dad-9e91-3c223e385824", + }, + PortsBindingExt: portsbinding.PortsBindingExt{ + VNICType: "normal", + HostID: "devstack", + }, + }, + } + + allPages, err := ports.List(fake.ServiceClient(), ports.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = ports.ExtractPortsInto(allPages, &actual) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleGet(t) + + var s struct { + ports.Port + portsbinding.PortsBindingExt + } + + err := ports.Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, "ACTIVE") + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.AdminStateUp, true) + th.AssertEquals(t, s.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, s.TenantID, "7e02058126cc4950b75f9970368ba177") + th.AssertEquals(t, s.DeviceOwner, "network:router_interface") + th.AssertEquals(t, s.MACAddress, "fa:16:3e:23:fd:d7") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.1"}, + }) + th.AssertEquals(t, s.ID, "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2") + th.AssertDeepEquals(t, s.SecurityGroups, []string{}) + th.AssertEquals(t, s.DeviceID, "5e3898d7-11be-483e-9732-b2f5eccd2b2e") + + th.AssertEquals(t, s.HostID, "devstack") + th.AssertEquals(t, s.VNICType, "normal") + th.AssertEquals(t, s.VIFType, "ovs") + th.AssertDeepEquals(t, s.VIFDetails, map[string]interface{}{"port_filter": true, "ovs_hybrid_plug": true}) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleCreate(t) + + var s struct { + ports.Port + portsbinding.PortsBindingExt + } + + asu := true + portCreateOpts := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + } + + createOpts := portsbinding.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + HostID: "HOST1", + VNICType: "normal", + } + + err := ports.Create(fake.ServiceClient(), createOpts).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, "DOWN") + th.AssertEquals(t, s.Name, "private-port") + th.AssertEquals(t, s.AdminStateUp, true) + th.AssertEquals(t, s.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, s.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, s.DeviceOwner, "") + th.AssertEquals(t, s.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, s.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) + th.AssertEquals(t, s.HostID, "HOST1") + th.AssertEquals(t, s.VNICType, "normal") +} + +func TestRequiredCreateOpts(t *testing.T) { + res := ports.Create(fake.ServiceClient(), portsbinding.CreateOptsExt{CreateOptsBuilder: ports.CreateOpts{}}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + HandleUpdate(t) + + var s struct { + ports.Port + portsbinding.PortsBindingExt + } + + portUpdateOpts := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: &[]string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + } + + updateOpts := portsbinding.UpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + HostID: "HOST1", + VNICType: "normal", + } + + err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) + th.AssertEquals(t, s.HostID, "HOST1") + th.AssertEquals(t, s.VNICType, "normal") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go new file mode 100644 index 000000000000..2b9a39168113 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/doc.go @@ -0,0 +1,145 @@ +/* +Package portsecurity provides information and interaction with the port +security extension for the OpenStack Networking service. + +Example to List Networks with Port Security Information + + type NetworkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + var allNetworks []NetworkWithPortSecurityExt + + listOpts := networks.ListOpts{ + Name: "network_1", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Println("%+v\n", network) + } + +Example to Create a Network without Port Security + + var networkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + networkCreateOpts := networks.CreateOpts{ + Name: "private", + } + + iFalse := false + createOpts := portsecurity.NetworkCreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Create(networkClient, createOpts).ExtractInto(&networkWithPortSecurityExt) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", networkWithPortSecurityExt) + +Example to Disable Port Security on an Existing Network + + var networkWithPortSecurityExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + iFalse := false + networkID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + networkUpdateOpts := networks.UpdateOpts{} + updateOpts := portsecurity.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Update(networkClient, networkID, updateOpts).ExtractInto(&networkWithPortSecurityExt) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", networkWithPortSecurityExt) + +Example to Get a Port with Port Security Information + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + portID := "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2" + + err := ports.Get(networkingClient, portID).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) + +Example to Create a Port Without Port Security + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + iFalse := false + networkID := "4e8e5957-649f-477b-9e5b-f1f75b21c03c" + subnetID := "a87cc70a-3e15-4acf-8205-9b711a3531b7" + + portCreateOpts := ports.CreateOpts{ + NetworkID: networkID, + FixedIPs: []ports.IP{ports.IP{SubnetID: subnetID}}, + } + + createOpts := portsecurity.PortCreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Create(networkingClient, createOpts).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) + +Example to Disable Port Security on an Existing Port + + var portWithPortSecurityExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + iFalse := false + portID := "65c0ee9f-d634-4522-8954-51021b570b0d" + + portUpdateOpts := ports.UpdateOpts{} + updateOpts := portsecurity.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Update(networkingClient, portID, updateOpts).ExtractInto(&portWithPortSecurityExtensions) + if err != nil { + panic(err) + } + + fmt.Println("%+v\n", portWithPortSecurityExtensions) +*/ +package portsecurity diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go new file mode 100644 index 000000000000..c80f47cf612d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/requests.go @@ -0,0 +1,104 @@ +package portsecurity + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" +) + +// PortCreateOptsExt adds port security options to the base ports.CreateOpts. +type PortCreateOptsExt struct { + ports.CreateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToPortCreateMap casts a CreateOpts struct to a map. +func (opts PortCreateOptsExt) ToPortCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToPortCreateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + port["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// PortUpdateOptsExt adds port security options to the base ports.UpdateOpts. +type PortUpdateOptsExt struct { + ports.UpdateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToPortUpdateMap casts a UpdateOpts struct to a map. +func (opts PortUpdateOptsExt) ToPortUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToPortUpdateMap() + if err != nil { + return nil, err + } + + port := base["port"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + port["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// NetworkCreateOptsExt adds port security options to the base +// networks.CreateOpts. +type NetworkCreateOptsExt struct { + networks.CreateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToNetworkCreateMap casts a CreateOpts struct to a map. +func (opts NetworkCreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + network["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} + +// NetworkUpdateOptsExt adds port security options to the base +// networks.UpdateOpts. +type NetworkUpdateOptsExt struct { + networks.UpdateOptsBuilder + + // PortSecurityEnabled toggles port security on a port. + PortSecurityEnabled *bool `json:"port_security_enabled,omitempty"` +} + +// ToNetworkUpdateMap casts a UpdateOpts struct to a map. +func (opts NetworkUpdateOptsExt) ToNetworkUpdateMap() (map[string]interface{}, error) { + base, err := opts.UpdateOptsBuilder.ToNetworkUpdateMap() + if err != nil { + return nil, err + } + + network := base["network"].(map[string]interface{}) + + if opts.PortSecurityEnabled != nil { + network["port_security_enabled"] = &opts.PortSecurityEnabled + } + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go new file mode 100644 index 000000000000..7b3482a40558 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity/results.go @@ -0,0 +1,7 @@ +package portsecurity + +type PortSecurityExt struct { + // PortSecurityEnabled specifies whether port security is enabled or + // disabled. + PortSecurityEnabled bool `json:"port_security_enabled"` +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go new file mode 100644 index 000000000000..ddc44175a79f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/doc.go @@ -0,0 +1,73 @@ +/* +Package provider gives access to the provider Neutron plugin, allowing +network extended attributes. The provider extended attributes for networks +enable administrative users to specify how network objects map to the +underlying networking infrastructure. These extended attributes also appear +when administrative users query networks. + +For more information about extended attributes, see the NetworkExtAttrs +struct. The actual semantics of these attributes depend on the technology +back end of the particular plug-in. See the plug-in documentation and the +OpenStack Cloud Administrator Guide to understand which values should be +specific for each of these attributes when OpenStack Networking is deployed +with a particular plug-in. The examples shown in this chapter refer to the +Open vSwitch plug-in. + +The default policy settings enable only users with administrative rights to +specify these parameters in requests and to see their values in responses. By +default, the provider network extension attributes are completely hidden from +regular tenants. As a rule of thumb, if these attributes are not visible in a +GET /networks/ operation, this implies the user submitting the +request is not authorized to view or manipulate provider network attributes. + +Example to List Networks with Provider Information + + type NetworkWithProvider { + networks.Network + provider.NetworkProviderExt + } + + var allNetworks []NetworkWithProvider + + allPages, err := networks.List(networkClient, nil).AllPages() + if err != nil { + panic(err) + } + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v\n", network) + } + +Example to Create a Provider Network + + segments := []provider.Segment{ + provider.Segment{ + NetworkType: "vxlan", + PhysicalNetwork: "br-ex", + SegmentationID: 615, + }, + } + + iTrue := true + networkCreateOpts := networks.CreateOpts{ + Name: "provider-network", + AdminStateUp: &iTrue, + Shared: &iTrue, + } + + createOpts : provider.CreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + Segments: segments, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } +*/ +package provider diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go new file mode 100644 index 000000000000..32c27970a4c7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/requests.go @@ -0,0 +1,28 @@ +package provider + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +// CreateOptsExt adds a Segments option to the base Network CreateOpts. +type CreateOptsExt struct { + networks.CreateOptsBuilder + Segments []Segment `json:"segments,omitempty"` +} + +// ToNetworkCreateMap adds segments to the base network creation options. +func (opts CreateOptsExt) ToNetworkCreateMap() (map[string]interface{}, error) { + base, err := opts.CreateOptsBuilder.ToNetworkCreateMap() + if err != nil { + return nil, err + } + + if opts.Segments == nil { + return base, nil + } + + providerMap := base["network"].(map[string]interface{}) + providerMap["segments"] = opts.Segments + + return base, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go new file mode 100644 index 000000000000..9babd2ab6ed8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/results.go @@ -0,0 +1,62 @@ +package provider + +import ( + "encoding/json" + "strconv" +) + +// NetworkProviderExt represents an extended form of a Network with additional +// fields. +type NetworkProviderExt struct { + // Specifies the nature of the physical network mapped to this network + // resource. Examples are flat, vlan, or gre. + NetworkType string `json:"provider:network_type"` + + // Identifies the physical network on top of which this network object is + // being implemented. The OpenStack Networking API does not expose any + // facility for retrieving the list of available physical networks. As an + // example, in the Open vSwitch plug-in this is a symbolic name which is + // then mapped to specific bridges on each compute host through the Open + // vSwitch plug-in configuration file. + PhysicalNetwork string `json:"provider:physical_network"` + + // Identifies an isolated segment on the physical network; the nature of the + // segment depends on the segmentation model defined by network_type. For + // instance, if network_type is vlan, then this is a vlan identifier; + // otherwise, if network_type is gre, then this will be a gre key. + SegmentationID string `json:"-"` + + // Segments is an array of Segment which defines multiple physical bindings + // to logical networks. + Segments []Segment `json:"segments"` +} + +// Segment defines a physical binding to a logical network. +type Segment struct { + PhysicalNetwork string `json:"provider:physical_network"` + NetworkType string `json:"provider:network_type"` + SegmentationID int `json:"provider:segmentation_id"` +} + +func (r *NetworkProviderExt) UnmarshalJSON(b []byte) error { + type tmp NetworkProviderExt + var networkProviderExt struct { + tmp + SegmentationID interface{} `json:"provider:segmentation_id"` + } + + if err := json.Unmarshal(b, &networkProviderExt); err != nil { + return err + } + + *r = NetworkProviderExt(networkProviderExt.tmp) + + switch t := networkProviderExt.SegmentationID.(type) { + case float64: + r.SegmentationID = strconv.FormatFloat(t, 'f', -1, 64) + case string: + r.SegmentationID = string(t) + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/doc.go new file mode 100644 index 000000000000..25d453926d28 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/doc.go @@ -0,0 +1,2 @@ +// provider unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/results_test.go new file mode 100644 index 000000000000..709f8306278f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider/testing/results_test.go @@ -0,0 +1,222 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/provider" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + nettest "github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, nettest.ListResponse) + }) + + type NetworkWithExt struct { + networks.Network + provider.NetworkProviderExt + } + var actual []NetworkWithExt + + allPages, err := networks.List(fake.ServiceClient(), networks.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = networks.ExtractNetworksInto(allPages, &actual) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "d32019d3-bc6e-4319-9c1d-6722fc136a22", actual[0].ID) + th.AssertEquals(t, "db193ab3-96e3-4cb3-8fc5-05f4296d0324", actual[1].ID) + th.AssertEquals(t, "local", actual[1].NetworkType) + th.AssertEquals(t, "1234567890", actual[1].SegmentationID) + th.AssertEquals(t, actual[0].Subnets[0], "54d6f61d-db07-451c-9ab3-b9609b6b6f0b") + th.AssertEquals(t, actual[1].Subnets[0], "08eae331-0402-425a-923c-34f7cfe39c1b") + +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, nettest.GetResponse) + }) + + var s struct { + networks.Network + provider.NetworkProviderExt + } + + err := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "d32019d3-bc6e-4319-9c1d-6722fc136a22", s.ID) + th.AssertEquals(t, "", s.PhysicalNetwork) + th.AssertEquals(t, "local", s.NetworkType) + th.AssertEquals(t, "9876543210", s.SegmentationID) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, nettest.CreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, nettest.CreateResponse) + }) + + var s struct { + networks.Network + provider.NetworkProviderExt + } + + options := networks.CreateOpts{Name: "private", AdminStateUp: gophercloud.Enabled} + err := networks.Create(fake.ServiceClient(), options).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "db193ab3-96e3-4cb3-8fc5-05f4296d0324", s.ID) + th.AssertEquals(t, "", s.PhysicalNetwork) + th.AssertEquals(t, "local", s.NetworkType) + th.AssertEquals(t, "9876543210", s.SegmentationID) +} + +func TestCreateWithMultipleProvider(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "network": { + "name": "sample_network", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345", + "segments": [ + { + "provider:segmentation_id": 666, + "provider:physical_network": "br-ex", + "provider:network_type": "vxlan" + }, + { + "provider:segmentation_id": 615, + "provider:physical_network": "br-ex", + "provider:network_type": "vxlan" + } + ] + } +} + `) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` +{ + "network": { + "status": "ACTIVE", + "name": "sample_network", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345", + "segments": [ + { + "provider:segmentation_id": 666, + "provider:physical_network": "br-ex", + "provider:network_type": "vlan" + }, + { + "provider:segmentation_id": 615, + "provider:physical_network": "br-ex", + "provider:network_type": "vlan" + } + ] + } +} + `) + }) + + iTrue := true + segments := []provider.Segment{ + provider.Segment{NetworkType: "vxlan", PhysicalNetwork: "br-ex", SegmentationID: 666}, + provider.Segment{NetworkType: "vxlan", PhysicalNetwork: "br-ex", SegmentationID: 615}, + } + + networkCreateOpts := networks.CreateOpts{ + Name: "sample_network", + AdminStateUp: &iTrue, + Shared: &iTrue, + TenantID: "12345", + } + + providerCreateOpts := provider.CreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + Segments: segments, + } + + _, err := networks.Create(fake.ServiceClient(), providerCreateOpts).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, nettest.UpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, nettest.UpdateResponse) + }) + + var s struct { + networks.Network + provider.NetworkProviderExt + } + + iTrue := true + options := networks.UpdateOpts{Name: "new_network_name", AdminStateUp: gophercloud.Disabled, Shared: &iTrue} + err := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, "4e8e5957-649f-477b-9e5b-f1f75b21c03c", s.ID) + th.AssertEquals(t, "", s.PhysicalNetwork) + th.AssertEquals(t, "local", s.NetworkType) + th.AssertEquals(t, "1234567890", s.SegmentationID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/doc.go new file mode 100644 index 000000000000..5cfc6884c1d4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/doc.go @@ -0,0 +1,19 @@ +/* +Package ruletypes contains functionality for working with Neutron 'quality of service' rule-type resources. + +Example: You can list rule-types in the following way: + + page, err := ruletypes.ListRuleTypes(client).AllPages() + if err != nil { + return + } + + rules, err := ruletypes.ExtractRuleTypes(page) + if err != nil { + return + } + + fmt.Printf("%v <- Rule Types\n", rules) + +*/ +package ruletypes diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/requests.go new file mode 100644 index 000000000000..e193cea5a262 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/requests.go @@ -0,0 +1,13 @@ +package ruletypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListRuleTypes returns the list of rule types from the server +func ListRuleTypes(c *gophercloud.ServiceClient) (result pagination.Pager) { + return pagination.NewPager(c, listRuleTypesURL(c), func(r pagination.PageResult) pagination.Page { + return ListRuleTypesPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/results.go new file mode 100644 index 000000000000..f62818819ead --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/results.go @@ -0,0 +1,26 @@ +package ruletypes + +import "github.com/gophercloud/gophercloud/pagination" + +// The result of listing the qos rule types +type RuleType struct { + Type string `json:"type"` +} + +type ListRuleTypesPage struct { + pagination.SinglePageBase +} + +func (r ListRuleTypesPage) IsEmpty() (bool, error) { + v, err := ExtractRuleTypes(r) + return len(v) == 0, err +} + +func ExtractRuleTypes(r pagination.Page) ([]RuleType, error) { + var s struct { + RuleTypes []RuleType `json:"rule_types"` + } + + err := (r.(ListRuleTypesPage)).ExtractInto(&s) + return s.RuleTypes, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/doc.go new file mode 100644 index 000000000000..1e9c71e8fb77 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/doc.go @@ -0,0 +1,2 @@ +// qos unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/fixtures.go new file mode 100644 index 000000000000..63556695d6a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/fixtures.go @@ -0,0 +1,19 @@ +package testing + +const ( + ListRuleTypesResponse = ` +{ + "rule_types": [ + { + "type": "bandwidth_limit" + }, + { + "type": "dscp_marking" + }, + { + "type": "minimum_bandwidth" + } + ] +} +` +) diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/requests_test.go new file mode 100644 index 000000000000..79559efe1ab5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/testing/requests_test.go @@ -0,0 +1,41 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListRuleTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprint(w, ListRuleTypesResponse) + }) + + page, err := ruletypes.ListRuleTypes(fake.ServiceClient()).AllPages() + if err != nil { + t.Errorf("Failed to list rule types pages: %v", err) + return + } + + rules, err := ruletypes.ExtractRuleTypes(page) + if err != nil { + t.Errorf("Failed to list rule types: %v", err) + return + } + + expected := []ruletypes.RuleType{{Type: "bandwidth_limit"}, {Type: "dscp_marking"}, {Type: "minimum_bandwidth"}} + th.AssertDeepEquals(t, expected, rules) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/urls.go new file mode 100644 index 000000000000..15795100c2cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/qos/ruletypes/urls.go @@ -0,0 +1,7 @@ +package ruletypes + +import "github.com/gophercloud/gophercloud" + +func listRuleTypesURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("qos", "rule-types") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/doc.go new file mode 100644 index 000000000000..f0ddbc0f670a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/doc.go @@ -0,0 +1,79 @@ +/* +Package rbacpolicies contains functionality for working with Neutron RBAC Policies. +Role-Based Access Control (RBAC) policy framework enables both operators +and users to grant access to resources for specific projects. + +Sharing an object with a specific project is accomplished by creating a +policy entry that permits the target project the access_as_shared action +on that object. + +To make a network available as an external network for specific projects +rather than all projects, use the access_as_external action. +If a network is marked as external during creation, it now implicitly creates +a wildcard RBAC policy granting everyone access to preserve previous behavior +before this feature was added. + +Example to Create a RBAC Policy + + createOpts := rbacpolicies.CreateOpts{ + Action: rbacpolicies.ActionAccessShared, + ObjectType: "network", + TargetTenant: "6e547a3bcfe44702889fdeff3c3520c3", + ObjectID: "240d22bf-bd17-4238-9758-25f72610ecdc" + } + + rbacPolicy, err := rbacpolicies.Create(rbacClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to List RBAC Policies + + listOpts := rbacpolicies.ListOpts{ + TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", + } + + allPages, err := rbacpolicies.List(rbacClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRBACPolicies, err := rbacpolicies.ExtractRBACPolicies(allPages) + if err != nil { + panic(err) + } + + for _, rbacpolicy := range allRBACPolicies { + fmt.Printf("%+v", rbacpolicy) + } + +Example to Delete a RBAC Policy + + rbacPolicyID := "94fe107f-da78-4d92-a9d7-5611b06dad8d" + err := rbacpolicies.Delete(rbacClient, rbacPolicyID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Get RBAC Policy by ID + + rbacPolicyID := "94fe107f-da78-4d92-a9d7-5611b06dad8d" + rbacpolicy, err := rbacpolicies.Get(rbacClient, rbacPolicyID).Extract() + if err != nil { + panic(err) + } + fmt.Printf("%+v", rbacpolicy) + +Example to Update a RBAC Policy + + rbacPolicyID := "570b0306-afb5-4d3b-ab47-458fdc16baaa" + updateOpts := rbacpolicies.UpdateOpts{ + TargetTenant: "9d766060b6354c9e8e2da44cab0e8f38", + } + rbacPolicy, err := rbacpolicies.Update(rbacClient, rbacPolicyID, updateOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package rbacpolicies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/requests.go new file mode 100644 index 000000000000..532a2f23d2c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/requests.go @@ -0,0 +1,142 @@ +package rbacpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToRBACPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the rbac attributes you want to see returned. SortKey allows you to sort +// by a particular rbac attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + TargetTenant string `q:"target_tenant"` + ObjectType string `q:"object_type"` + ObjectID string `q:"object_id"` + Action PolicyAction `q:"action"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToRBACPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToRBACPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// rbac policies. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToRBACPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return RBACPolicyPage{pagination.LinkedPageBase{PageResult: r}} + + }) +} + +// Get retrieves a specific rbac policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// PolicyAction maps to Action for the RBAC policy. +// Which allows access_as_external or access_as_shared. +type PolicyAction string + +const ( + // ActionAccessExternal returns Action for the RBAC policy as access_as_external. + ActionAccessExternal PolicyAction = "access_as_external" + + // ActionAccessShared returns Action for the RBAC policy as access_as_shared. + ActionAccessShared PolicyAction = "access_as_shared" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToRBACPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a rbac-policy. +type CreateOpts struct { + Action PolicyAction `json:"action" required:"true"` + ObjectType string `json:"object_type" required:"true"` + TargetTenant string `json:"target_tenant" required:"true"` + ObjectID string `json:"object_id" required:"true"` +} + +// ToRBACPolicyCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToRBACPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rbac_policy") +} + +// Create accepts a CreateOpts struct and creates a new rbac-policy using the values +// provided. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// rbac-policy. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToRBACPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// Delete accepts a unique ID and deletes the rbac-policy associated with it. +func Delete(c *gophercloud.ServiceClient, rbacPolicyID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, rbacPolicyID), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToRBACPolicyUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a rbac-policy. +type UpdateOpts struct { + TargetTenant string `json:"target_tenant" required:"true"` +} + +// ToRBACPolicyUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToRBACPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "rbac_policy") +} + +// Update accepts a UpdateOpts struct and updates an existing rbac-policy using the +// values provided. +func Update(c *gophercloud.ServiceClient, rbacPolicyID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToRBACPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, rbacPolicyID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/results.go new file mode 100644 index 000000000000..a62facc0dc7d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/results.go @@ -0,0 +1,98 @@ +package rbacpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts RBAC Policy resource. +func (r commonResult) Extract() (*RBACPolicy, error) { + var s RBACPolicy + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "rbac_policy") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a RBAC Policy. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a RBAC Policy. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a RBAC Policy. +type UpdateResult struct { + commonResult +} + +// RBACPolicy represents a RBAC policy. +type RBACPolicy struct { + // UUID of the RBAC policy. + ID string `json:"id"` + + // Action for the RBAC policy which is access_as_external or access_as_shared. + Action PolicyAction `json:"action"` + + // ObjectID is the ID of the object_type resource. + // An object_type of network returns a network ID and + // object_type of qos-policy returns a QoS ID. + ObjectID string `json:"object_id"` + + // ObjectType is the type of the object that the RBAC policy affects. + // Types include qos-policy or network. + ObjectType string `json:"object_type"` + + // TenantID is the ID of the project that owns the resource. + TenantID string `json:"tenant_id"` + + // TargetTenant is the ID of the tenant to which the RBAC policy will be enforced. + TargetTenant string `json:"target_tenant"` + + // ProjectID is the ID of the project. + ProjectID string `json:"project_id"` +} + +// RBACPolicyPage is the page returned by a pager when traversing over a +// collection of rbac policies. +type RBACPolicyPage struct { + pagination.LinkedPageBase +} + +// IsEmpty checks whether a RBACPolicyPage struct is empty. +func (r RBACPolicyPage) IsEmpty() (bool, error) { + is, err := ExtractRBACPolicies(r) + return len(is) == 0, err +} + +// ExtractRBACPolicies accepts a Page struct, specifically a RBAC Policy struct, +// and extracts the elements into a slice of RBAC Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRBACPolicies(r pagination.Page) ([]RBACPolicy, error) { + var s []RBACPolicy + err := ExtractRBACPolicesInto(r, &s) + return s, err +} + +// ExtractRBACPolicesInto extracts the elements into a slice of RBAC Policy structs. +func ExtractRBACPolicesInto(r pagination.Page, v interface{}) error { + return r.(RBACPolicyPage).Result.ExtractIntoSlicePtr(v, "rbac_policies") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/doc.go new file mode 100644 index 000000000000..e95610ae4f87 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/doc.go @@ -0,0 +1,2 @@ +// Package testing includes rbac unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/fixtures.go new file mode 100644 index 000000000000..f63fa2b89fc2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/fixtures.go @@ -0,0 +1,112 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies" +) + +const ListResponse = ` +{ + "rbac_policies": [ + { + "target_tenant": "6e547a3bcfe44702889fdeff3c3520c3", + "tenant_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "object_type": "network", + "object_id": "240d22bf-bd17-4238-9758-25f72610ecdc", + "action": "access_as_shared", + "project_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "id": "2cf7523a-93b5-4e69-9360-6c6bf986bb7c" + }, + { + "target_tenant": "1a547a3bcfe44702889fdeff3c3520c3", + "tenant_id": "1ae27ce0a2a54cc6ae06dc62dd0ec832", + "object_type": "network", + "object_id": "120d22bf-bd17-4238-9758-25f72610ecdc", + "action": "access_as_shared", + "project_id": "1ae27ce0a2a54cc6ae06dc62dd0ec832", + "id":"1ab7523a-93b5-4e69-9360-6c6bf986bb7c" + } + ] +}` + +// CreateRequest is the structure of request body to create rbac-policy. +const CreateRequest = ` +{ + "rbac_policy": { + "action": "access_as_shared", + "object_type": "network", + "target_tenant": "6e547a3bcfe44702889fdeff3c3520c3", + "object_id": "240d22bf-bd17-4238-9758-25f72610ecdc" + } +}` + +// CreateResponse is the structure of response body of rbac-policy create. +const CreateResponse = ` +{ + "rbac_policy": { + "target_tenant": "6e547a3bcfe44702889fdeff3c3520c3", + "tenant_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "object_type": "network", + "object_id": "240d22bf-bd17-4238-9758-25f72610ecdc", + "action": "access_as_shared", + "project_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "id": "2cf7523a-93b5-4e69-9360-6c6bf986bb7c" + } +}` + +// GetResponse is the structure of the response body of rbac-policy get operation. +const GetResponse = ` +{ + "rbac_policy": { + "target_tenant": "6e547a3bcfe44702889fdeff3c3520c3", + "tenant_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "object_type": "network", + "object_id": "240d22bf-bd17-4238-9758-25f72610ecdc", + "action": "access_as_shared", + "project_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "id": "2cf7523a-93b5-4e69-9360-6c6bf986bb7c" + } +}` + +// UpdateRequest is the structure of request body to update rbac-policy. +const UpdateRequest = ` +{ + "rbac_policy": { + "target_tenant": "9d766060b6354c9e8e2da44cab0e8f38" + } +}` + +// UpdateResponse is the structure of response body of rbac-policy update. +const UpdateResponse = ` +{ + "rbac_policy": { + "target_tenant": "9d766060b6354c9e8e2da44cab0e8f38", + "tenant_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "object_type": "network", + "object_id": "240d22bf-bd17-4238-9758-25f72610ecdc", + "action": "access_as_shared", + "project_id": "3de27ce0a2a54cc6ae06dc62dd0ec832", + "id": "2cf7523a-93b5-4e69-9360-6c6bf986bb7c" + } +}` + +var rbacPolicy1 = rbacpolicies.RBACPolicy{ + ID: "2cf7523a-93b5-4e69-9360-6c6bf986bb7c", + Action: rbacpolicies.ActionAccessShared, + ObjectID: "240d22bf-bd17-4238-9758-25f72610ecdc", + ObjectType: "network", + TenantID: "3de27ce0a2a54cc6ae06dc62dd0ec832", + TargetTenant: "6e547a3bcfe44702889fdeff3c3520c3", + ProjectID: "3de27ce0a2a54cc6ae06dc62dd0ec832", +} + +var rbacPolicy2 = rbacpolicies.RBACPolicy{ + ID: "1ab7523a-93b5-4e69-9360-6c6bf986bb7c", + Action: rbacpolicies.ActionAccessShared, + ObjectID: "120d22bf-bd17-4238-9758-25f72610ecdc", + ObjectType: "network", + TenantID: "1ae27ce0a2a54cc6ae06dc62dd0ec832", + TargetTenant: "1a547a3bcfe44702889fdeff3c3520c3", + ProjectID: "1ae27ce0a2a54cc6ae06dc62dd0ec832", +} + +var ExpectedRBACPoliciesSlice = []rbacpolicies.RBACPolicy{rbacPolicy1, rbacPolicy2} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/requests_test.go new file mode 100644 index 000000000000..8aad843459e8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/testing/requests_test.go @@ -0,0 +1,169 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateResponse) + }) + + options := rbacpolicies.CreateOpts{ + Action: rbacpolicies.ActionAccessShared, + ObjectType: "network", + TargetTenant: "6e547a3bcfe44702889fdeff3c3520c3", + ObjectID: "240d22bf-bd17-4238-9758-25f72610ecdc", + } + rbacResult, err := rbacpolicies.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, &rbacPolicy1, rbacResult) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies/2cf7523a-93b5-4e69-9360-6c6bf986bb7c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) + + n, err := rbacpolicies.Get(fake.ServiceClient(), "2cf7523a-93b5-4e69-9360-6c6bf986bb7c").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &rbacPolicy1, n) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + client := fake.ServiceClient() + count := 0 + + rbacpolicies.List(client, rbacpolicies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := rbacpolicies.ExtractRBACPolicies(page) + if err != nil { + t.Errorf("Failed to extract rbac policies: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedRBACPoliciesSlice, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListWithAllPages(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + client := fake.ServiceClient() + + type newRBACPolicy struct { + rbacpolicies.RBACPolicy + } + + var allRBACpolicies []newRBACPolicy + + allPages, err := rbacpolicies.List(client, rbacpolicies.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = rbacpolicies.ExtractRBACPolicesInto(allPages, &allRBACpolicies) + th.AssertNoErr(t, err) + + th.AssertEquals(t, allRBACpolicies[0].ObjectType, "network") + th.AssertEquals(t, allRBACpolicies[0].Action, rbacpolicies.ActionAccessShared) + + th.AssertEquals(t, allRBACpolicies[1].ProjectID, "1ae27ce0a2a54cc6ae06dc62dd0ec832") + th.AssertEquals(t, allRBACpolicies[1].TargetTenant, "1a547a3bcfe44702889fdeff3c3520c3") + +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies/71d55b18-d2f8-4c76-a5e6-e0a3dd114361", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := rbacpolicies.Delete(fake.ServiceClient(), "71d55b18-d2f8-4c76-a5e6-e0a3dd114361").ExtractErr() + th.AssertNoErr(t, res) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/rbac-policies/2cf7523a-93b5-4e69-9360-6c6bf986bb7c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateResponse) + }) + + options := rbacpolicies.UpdateOpts{TargetTenant: "9d766060b6354c9e8e2da44cab0e8f38"} + rbacResult, err := rbacpolicies.Update(fake.ServiceClient(), "2cf7523a-93b5-4e69-9360-6c6bf986bb7c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, rbacResult.TargetTenant, "9d766060b6354c9e8e2da44cab0e8f38") + th.AssertEquals(t, rbacResult.ID, "2cf7523a-93b5-4e69-9360-6c6bf986bb7c") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/urls.go new file mode 100644 index 000000000000..8beeed9a5fa3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/rbacpolicies/urls.go @@ -0,0 +1,31 @@ +package rbacpolicies + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("rbac-policies", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("rbac-policies") +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/doc.go new file mode 100644 index 000000000000..31f744ccd7a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/doc.go @@ -0,0 +1,32 @@ +// Package security contains functionality to work with security group and +// security group rules Neutron resources. +// +// Security groups and security group rules allows administrators and tenants +// the ability to specify the type of traffic and direction (ingress/egress) +// that is allowed to pass through a port. A security group is a container for +// security group rules. +// +// When a port is created in Networking it is associated with a security group. +// If a security group is not specified the port is associated with a 'default' +// security group. By default, this group drops all ingress traffic and allows +// all egress. Rules can be added to this group in order to change the behaviour. +// +// The basic characteristics of Neutron Security Groups are: +// +// For ingress traffic (to an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all traffic is dropped. +// +// For egress traffic (from an instance) +// - Only traffic matched with security group rules are allowed. +// - When there is no rule defined, all egress traffic are dropped. +// - When a new security group is created, rules to allow all egress traffic +// is automatically added. +// +// "default security group" is defined for each tenant. +// - For the default security group a rule which allows intercommunication +// among hosts associated with the default security group is defined by default. +// - As a result, all egress traffic and intercommunication in the default +// group are allowed and all ingress from outside of the default group is +// dropped by default (in the default security group). +package security diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go new file mode 100644 index 000000000000..7d8bbcaacbad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/doc.go @@ -0,0 +1,58 @@ +/* +Package groups provides information and interaction with Security Groups +for the OpenStack Networking service. + +Example to List Security Groups + + listOpts := groups.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := groups.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := groups.ExtractGroups(allPages) + if err != nil { + panic(err) + } + + for _, group := range allGroups { + fmt.Printf("%+v\n", group) + } + +Example to Create a Security Group + + createOpts := groups.CreateOpts{ + Name: "group_name", + Description: "A Security Group", + } + + group, err := groups.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + + updateOpts := groups.UpdateOpts{ + Name: "new_name", + } + + group, err := groups.Update(networkClient, groupID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group + + groupID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := groups.Delete(networkClient, groupID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package groups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go new file mode 100644 index 000000000000..2a46ce6aa0a8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/requests.go @@ -0,0 +1,161 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the group attributes you want to see returned. SortKey allows you to +// sort by a particular network attribute. SortDir sets the direction, and is +// either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security groups. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group. +type CreateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name" required:"true"` + + // TenantID is the UUID of the project who owns the Group. + // Only administrative users can specify a tenant UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the UUID of the project who owns the Group. + // Only administrative users can specify a tenant UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // Describes the security group. + Description string `json:"description,omitempty"` +} + +// ToSecGroupCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Create is an operation which provisions a new security group with default +// security group rules for the IPv4 and IPv6 ether types. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains all the values needed to update an existing security +// group. +type UpdateOpts struct { + // Human-readable name for the Security Group. Does not have to be unique. + Name string `json:"name,omitempty"` + + // Describes the security group. + Description string `json:"description,omitempty"` +} + +// ToSecGroupUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSecGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group") +} + +// Update is an operation which updates an existing security group. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecGroupUpdateMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Get retrieves a particular security group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a security group's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractGroups(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "security group"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "security group"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go new file mode 100644 index 000000000000..66915e6e559e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/results.go @@ -0,0 +1,105 @@ +package groups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroup represents a container for security group rules. +type SecGroup struct { + // The UUID for the security group. + ID string + + // Human-readable name for the security group. Might not be unique. + // Cannot be named "default" as that is automatically created for a tenant. + Name string + + // The security group description. + Description string + + // A slice of security group rules that dictate the permitted behaviour for + // traffic entering and leaving the group. + Rules []rules.SecGroupRule `json:"security_group_rules"` + + // TenantID is the project owner of the security group. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the security group. + ProjectID string `json:"project_id"` +} + +// SecGroupPage is the page returned by a pager when traversing over a +// collection of security groups. +type SecGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security groups has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_groups_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupPage struct is empty. +func (r SecGroupPage) IsEmpty() (bool, error) { + is, err := ExtractGroups(r) + return len(is) == 0, err +} + +// ExtractGroups accepts a Page struct, specifically a SecGroupPage struct, +// and extracts the elements into a slice of SecGroup structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractGroups(r pagination.Page) ([]SecGroup, error) { + var s struct { + SecGroups []SecGroup `json:"security_groups"` + } + err := (r.(SecGroupPage)).ExtractInto(&s) + return s.SecGroups, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security group. +func (r commonResult) Extract() (*SecGroup, error) { + var s struct { + SecGroup *SecGroup `json:"security_group"` + } + err := r.ExtractInto(&s) + return s.SecGroup, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroup. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a SecGroup. +type UpdateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroup. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/doc.go new file mode 100644 index 000000000000..794dee5b114b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/doc.go @@ -0,0 +1,2 @@ +// groups unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/fixtures.go new file mode 100644 index 000000000000..9e4a931fe418 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/fixtures.go @@ -0,0 +1,156 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" +) + +const SecurityGroupListResponse = ` +{ + "security_groups": [ + { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] +} +` + +var SecurityGroup1 = groups.SecGroup{ + Description: "default", + ID: "85cc3048-abc3-43cc-89b3-377341426ac5", + Name: "default", + Rules: []rules.SecGroupRule{}, + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", +} + +const SecurityGroupCreateRequest = ` +{ + "security_group": { + "name": "new-webservers", + "description": "security group for webservers" + } +} +` + +const SecurityGroupCreateResponse = ` +{ + "security_group": { + "description": "security group for webservers", + "id": "2076db17-a522-4506-91de-c6dd8e837028", + "name": "new-webservers", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv4", + "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv6", + "id": "565b9502-12de-4ffd-91e9-68885cff6ae1", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} +` + +const SecurityGroupUpdateRequest = ` +{ + "security_group": { + "name": "newer-webservers" + } +} +` + +const SecurityGroupUpdateResponse = ` +{ + "security_group": { + "description": "security group for webservers", + "id": "2076db17-a522-4506-91de-c6dd8e837028", + "name": "newer-webservers", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv4", + "id": "38ce2d8e-e8f1-48bd-83c2-d33cb9f50c3d", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv6", + "id": "565b9502-12de-4ffd-91e9-68885cff6ae1", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "2076db17-a522-4506-91de-c6dd8e837028", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} +` + +const SecurityGroupGetResponse = ` +{ + "security_group": { + "description": "default", + "id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "name": "default", + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ], + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/requests_test.go new file mode 100644 index 000000000000..5bb8c70c5bcb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/testing/requests_test.go @@ -0,0 +1,134 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SecurityGroupListResponse) + }) + + count := 0 + + groups.List(fake.ServiceClient(), groups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := groups.ExtractGroups(page) + if err != nil { + t.Errorf("Failed to extract secgroups: %v", err) + return false, err + } + + expected := []groups.SecGroup{SecurityGroup1} + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SecurityGroupCreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SecurityGroupCreateResponse) + }) + + opts := groups.CreateOpts{Name: "new-webservers", Description: "security group for webservers"} + _, err := groups.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/2076db17-a522-4506-91de-c6dd8e837028", + func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SecurityGroupUpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SecurityGroupUpdateResponse) + }) + + opts := groups.UpdateOpts{Name: "newer-webservers"} + sg, err := groups.Update(fake.ServiceClient(), "2076db17-a522-4506-91de-c6dd8e837028", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "newer-webservers", sg.Name) + th.AssertEquals(t, "security group for webservers", sg.Description) + th.AssertEquals(t, "2076db17-a522-4506-91de-c6dd8e837028", sg.ID) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/85cc3048-abc3-43cc-89b3-377341426ac5", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SecurityGroupGetResponse) + }) + + sg, err := groups.Get(fake.ServiceClient(), "85cc3048-abc3-43cc-89b3-377341426ac5").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "default", sg.Description) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sg.ID) + th.AssertEquals(t, "default", sg.Name) + th.AssertEquals(t, 2, len(sg.Rules)) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sg.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-groups/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := groups.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go new file mode 100644 index 000000000000..104cbcc558dc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/groups/urls.go @@ -0,0 +1,13 @@ +package groups + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-groups" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go new file mode 100644 index 000000000000..bf66dc8b40e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/doc.go @@ -0,0 +1,50 @@ +/* +Package rules provides information and interaction with Security Group Rules +for the OpenStack Networking service. + +Example to List Security Groups Rules + + listOpts := rules.ListOpts{ + Protocol: "tcp", + } + + allPages, err := rules.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allRules, err := rules.ExtractRules(allPages) + if err != nil { + panic(err) + } + + for _, rule := range allRules { + fmt.Printf("%+v\n", rule) + } + +Example to Create a Security Group Rule + + createOpts := rules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: rules.EtherType4, + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + + rule, err := rules.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Security Group Rule + + ruleID := "37d94f8a-d136-465c-ae46-144f0d8ef141" + err := rules.Delete(networkClient, ruleID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package rules diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go new file mode 100644 index 000000000000..96cce2817da2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/requests.go @@ -0,0 +1,155 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the security group rule attributes you want to see returned. SortKey allows +// you to sort by a particular network attribute. SortDir sets the direction, +// and is either `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Direction string `q:"direction"` + EtherType string `q:"ethertype"` + ID string `q:"id"` + PortRangeMax int `q:"port_range_max"` + PortRangeMin int `q:"port_range_min"` + Protocol string `q:"protocol"` + RemoteGroupID string `q:"remote_group_id"` + RemoteIPPrefix string `q:"remote_ip_prefix"` + SecGroupID string `q:"security_group_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// List returns a Pager which allows you to iterate over a collection of +// security group rules. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOpts) pagination.Pager { + q, err := gophercloud.BuildQueryString(&opts) + if err != nil { + return pagination.Pager{Err: err} + } + u := rootURL(c) + q.String() + return pagination.NewPager(c, u, func(r pagination.PageResult) pagination.Page { + return SecGroupRulePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +type RuleDirection string +type RuleProtocol string +type RuleEtherType string + +// Constants useful for CreateOpts +const ( + DirIngress RuleDirection = "ingress" + DirEgress RuleDirection = "egress" + EtherType4 RuleEtherType = "IPv4" + EtherType6 RuleEtherType = "IPv6" + ProtocolAH RuleProtocol = "ah" + ProtocolDCCP RuleProtocol = "dccp" + ProtocolEGP RuleProtocol = "egp" + ProtocolESP RuleProtocol = "esp" + ProtocolGRE RuleProtocol = "gre" + ProtocolICMP RuleProtocol = "icmp" + ProtocolIGMP RuleProtocol = "igmp" + ProtocolIPv6Encap RuleProtocol = "ipv6-encap" + ProtocolIPv6Frag RuleProtocol = "ipv6-frag" + ProtocolIPv6ICMP RuleProtocol = "ipv6-icmp" + ProtocolIPv6NoNxt RuleProtocol = "ipv6-nonxt" + ProtocolIPv6Opts RuleProtocol = "ipv6-opts" + ProtocolIPv6Route RuleProtocol = "ipv6-route" + ProtocolOSPF RuleProtocol = "ospf" + ProtocolPGM RuleProtocol = "pgm" + ProtocolRSVP RuleProtocol = "rsvp" + ProtocolSCTP RuleProtocol = "sctp" + ProtocolTCP RuleProtocol = "tcp" + ProtocolUDP RuleProtocol = "udp" + ProtocolUDPLite RuleProtocol = "udplite" + ProtocolVRRP RuleProtocol = "vrrp" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecGroupRuleCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new security group +// rule. +type CreateOpts struct { + // Must be either "ingress" or "egress": the direction in which the security + // group rule is applied. + Direction RuleDirection `json:"direction" required:"true"` + + // Must be "IPv4" or "IPv6", and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType RuleEtherType `json:"ethertype" required:"true"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id" required:"true"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max,omitempty"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min,omitempty"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol RuleProtocol `json:"protocol,omitempty"` + + // The remote group ID to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id,omitempty"` + + // The remote IP prefix to be associated with this security group rule. You can + // specify either RemoteGroupID or RemoteIPPrefix. This attribute matches the + // specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix,omitempty"` + + // TenantID is the UUID of the project who owns the Rule. + // Only administrative users can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` +} + +// ToSecGroupRuleCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSecGroupRuleCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_group_rule") +} + +// Create is an operation which adds a new security group rule and associates it +// with an existing security group (whose ID is specified in CreateOpts). +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecGroupRuleCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular security group rule based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular security group rule based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go new file mode 100644 index 000000000000..377e753140c4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/results.go @@ -0,0 +1,124 @@ +package rules + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecGroupRule represents a rule to dictate the behaviour of incoming or +// outgoing traffic for a particular security group. +type SecGroupRule struct { + // The UUID for this security group rule. + ID string + + // The direction in which the security group rule is applied. The only values + // allowed are "ingress" or "egress". For a compute instance, an ingress + // security group rule is applied to incoming (ingress) traffic for that + // instance. An egress rule is applied to traffic leaving the instance. + Direction string + + // Must be IPv4 or IPv6, and addresses represented in CIDR must match the + // ingress or egress rules. + EtherType string `json:"ethertype"` + + // The security group ID to associate with this security group rule. + SecGroupID string `json:"security_group_id"` + + // The minimum port number in the range that is matched by the security group + // rule. If the protocol is TCP or UDP, this value must be less than or equal + // to the value of the PortRangeMax attribute. If the protocol is ICMP, this + // value must be an ICMP type. + PortRangeMin int `json:"port_range_min"` + + // The maximum port number in the range that is matched by the security group + // rule. The PortRangeMin attribute constrains the PortRangeMax attribute. If + // the protocol is ICMP, this value must be an ICMP type. + PortRangeMax int `json:"port_range_max"` + + // The protocol that is matched by the security group rule. Valid values are + // "tcp", "udp", "icmp" or an empty string. + Protocol string + + // The remote group ID to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix. + RemoteGroupID string `json:"remote_group_id"` + + // The remote IP prefix to be associated with this security group rule. You + // can specify either RemoteGroupID or RemoteIPPrefix . This attribute + // matches the specified IP prefix as the source IP address of the IP packet. + RemoteIPPrefix string `json:"remote_ip_prefix"` + + // TenantID is the project owner of this security group rule. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of this security group rule. + ProjectID string `json:"project_id"` +} + +// SecGroupRulePage is the page returned by a pager when traversing over a +// collection of security group rules. +type SecGroupRulePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of security group rules has +// reached the end of a page and the pager seeks to traverse over a new one. In +// order to do this, it needs to construct the next page's URL. +func (r SecGroupRulePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"security_group_rules_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SecGroupRulePage struct is empty. +func (r SecGroupRulePage) IsEmpty() (bool, error) { + is, err := ExtractRules(r) + return len(is) == 0, err +} + +// ExtractRules accepts a Page struct, specifically a SecGroupRulePage struct, +// and extracts the elements into a slice of SecGroupRule structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractRules(r pagination.Page) ([]SecGroupRule, error) { + var s struct { + SecGroupRules []SecGroupRule `json:"security_group_rules"` + } + err := (r.(SecGroupRulePage)).ExtractInto(&s) + return s.SecGroupRules, err +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a security rule. +func (r commonResult) Extract() (*SecGroupRule, error) { + var s struct { + SecGroupRule *SecGroupRule `json:"security_group_rule"` + } + err := r.ExtractInto(&s) + return s.SecGroupRule, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SecGroupRule. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SecGroupRule. +type GetResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/doc.go new file mode 100644 index 000000000000..df31e6c5c3be --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/doc.go @@ -0,0 +1,2 @@ +// rules unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/requests_test.go new file mode 100644 index 000000000000..968fd04d8fdc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/testing/requests_test.go @@ -0,0 +1,236 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rules": [ + { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + }, + { + "direction": "egress", + "ethertype": "IPv4", + "id": "93aa42e5-80db-4581-9391-3a608bd0e448", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } + ] +} + `) + }) + + count := 0 + + rules.List(fake.ServiceClient(), rules.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := rules.ExtractRules(page) + if err != nil { + t.Errorf("Failed to extract secrules: %v", err) + return false, err + } + + expected := []rules.SecGroupRule{ + { + Direction: "egress", + EtherType: "IPv6", + ID: "3c0e45ff-adaf-4124-b083-bf390e5482ff", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + { + Direction: "egress", + EtherType: "IPv4", + ID: "93aa42e5-80db-4581-9391-3a608bd0e448", + PortRangeMax: 0, + PortRangeMin: 0, + Protocol: "", + RemoteGroupID: "", + RemoteIPPrefix: "", + SecGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + TenantID: "e4f50856753b4dc6afee5fa6b9b6c550", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "security_group_rule": { + "direction": "ingress", + "port_range_min": 80, + "ethertype": "IPv4", + "port_range_max": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "direction": "ingress", + "ethertype": "IPv4", + "id": "2bc0accf-312e-429a-956e-e4407625eb62", + "port_range_max": 80, + "port_range_min": 80, + "protocol": "tcp", + "remote_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "remote_ip_prefix": null, + "security_group_id": "a7734e61-b545-452d-a3cd-0189cbd9747a", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + opts := rules.CreateOpts{ + Direction: "ingress", + PortRangeMin: 80, + EtherType: rules.EtherType4, + PortRangeMax: 80, + Protocol: "tcp", + RemoteGroupID: "85cc3048-abc3-43cc-89b3-377341426ac5", + SecGroupID: "a7734e61-b545-452d-a3cd-0189cbd9747a", + } + _, err := rules.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := rules.Create(fake.ServiceClient(), rules.CreateOpts{Direction: rules.DirIngress}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = rules.Create(fake.ServiceClient(), rules.CreateOpts{Direction: rules.DirIngress, EtherType: rules.EtherType4}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = rules.Create(fake.ServiceClient(), rules.CreateOpts{Direction: rules.DirIngress, EtherType: rules.EtherType4}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + res = rules.Create(fake.ServiceClient(), rules.CreateOpts{Direction: rules.DirIngress, EtherType: rules.EtherType4, SecGroupID: "something", Protocol: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/3c0e45ff-adaf-4124-b083-bf390e5482ff", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "security_group_rule": { + "direction": "egress", + "ethertype": "IPv6", + "id": "3c0e45ff-adaf-4124-b083-bf390e5482ff", + "port_range_max": null, + "port_range_min": null, + "protocol": null, + "remote_group_id": null, + "remote_ip_prefix": null, + "security_group_id": "85cc3048-abc3-43cc-89b3-377341426ac5", + "tenant_id": "e4f50856753b4dc6afee5fa6b9b6c550" + } +} + `) + }) + + sr, err := rules.Get(fake.ServiceClient(), "3c0e45ff-adaf-4124-b083-bf390e5482ff").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, "egress", sr.Direction) + th.AssertEquals(t, "IPv6", sr.EtherType) + th.AssertEquals(t, "3c0e45ff-adaf-4124-b083-bf390e5482ff", sr.ID) + th.AssertEquals(t, 0, sr.PortRangeMax) + th.AssertEquals(t, 0, sr.PortRangeMin) + th.AssertEquals(t, "", sr.Protocol) + th.AssertEquals(t, "", sr.RemoteGroupID) + th.AssertEquals(t, "", sr.RemoteIPPrefix) + th.AssertEquals(t, "85cc3048-abc3-43cc-89b3-377341426ac5", sr.SecGroupID) + th.AssertEquals(t, "e4f50856753b4dc6afee5fa6b9b6c550", sr.TenantID) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/security-group-rules/4ec89087-d057-4e2c-911f-60a3b47ee304", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := rules.Delete(fake.ServiceClient(), "4ec89087-d057-4e2c-911f-60a3b47ee304") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go new file mode 100644 index 000000000000..a5ede0e89b91 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/security/rules/urls.go @@ -0,0 +1,13 @@ +package rules + +import "github.com/gophercloud/gophercloud" + +const rootPath = "security-group-rules" + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go new file mode 100644 index 000000000000..2a9fe63dd4b7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/doc.go @@ -0,0 +1,72 @@ +/* +Package subnetpools provides the ability to retrieve and manage subnetpools through the Neutron API. + +Example of Listing Subnetpools + + listOpts := subnets.ListOpts{ + IPVersion: 6, + } + + allPages, err := subnetpools.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSubnetpools, err := subnetpools.ExtractSubnetPools(allPages) + if err != nil { + panic(err) + } + + for _, subnetpools := range allSubnetpools { + fmt.Printf("%+v\n", subnetpools) + } + +Example to Get a Subnetpool + + subnetPoolID = "23d5d3f7-9dfa-4f73-b72b-8b0b0063ec55" + subnetPool, err := subnetpools.Get(networkClient, subnetPoolID).Extract() + if err != nil { + panic(err) + } + +Example to Create a new Subnetpool + + subnetPoolName := "private_pool" + subnetPoolPrefixes := []string{ + "10.0.0.0/8", + "172.16.0.0/12", + "192.168.0.0/16", + } + subnetPoolOpts := subnetpools.CreateOpts{ + Name: subnetPoolName, + Prefixes: subnetPoolPrefixes, + } + subnetPool, err := subnetpools.Create(networkClient, subnetPoolOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Subnetpool + + subnetPoolID := "099546ca-788d-41e5-a76d-17d8cd282d3e" + updateOpts := networks.UpdateOpts{ + Prefixes: []string{ + "fdf7:b13d:dead:beef::/64", + }, + MaxPrefixLen: 72, + } + + subnetPool, err := subnetpools.Update(networkClient, subnetPoolID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Subnetpool + + subnetPoolID := "23d5d3f7-9dfa-4f73-b72b-8b0b0063ec55" + err := subnetpools.Delete(networkClient, subnetPoolID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package subnetpools diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go new file mode 100644 index 000000000000..e7ee96aef64e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/requests.go @@ -0,0 +1,221 @@ +package subnetpools + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetPoolListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the Neutron API. Filtering is achieved by passing in struct field values +// that map to the subnetpool attributes you want to see returned. +// SortKey allows you to sort by a particular subnetpool attribute. +// SortDir sets the direction, and is either `asc' or `desc'. +// Marker and Limit are used for the pagination. +type ListOpts struct { + ID string `q:"id"` + Name string `q:"name"` + DefaultQuota int `q:"default_quota"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + DefaultPrefixLen int `q:"default_prefixlen"` + MinPrefixLen int `q:"min_prefixlen"` + MaxPrefixLen int `q:"max_prefixlen"` + AddressScopeID string `q:"address_scope_id"` + IPVersion int `q:"ip_version"` + Shared *bool `q:"shared"` + Description string `q:"description"` + IsDefault *bool `q:"is_default"` + RevisionNumber int `q:"revision_number"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToSubnetPoolListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetPoolListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// subnetpools. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only the subnetpools owned by the project +// of the user submitting the request, unless the user has the administrative role. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetPoolListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPoolPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnetpool based on its ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSubnetPoolCreateMap() (map[string]interface{}, error) +} + +// CreateOpts specifies parameters of a new subnetpool. +type CreateOpts struct { + // Name is the human-readable name of the subnetpool. + Name string `json:"name"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota int `json:"default_quota,omitempty"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes"` + + // DefaultPrefixLen is the size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"default_prefixlen,omitempty"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"min_prefixlen,omitempty"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"max_prefixlen,omitempty"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID string `json:"address_scope_id,omitempty"` + + // Shared indicates whether this network is shared across all projects. + Shared bool `json:"shared,omitempty"` + + // Description is the human-readable description for the resource. + Description string `json:"description,omitempty"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault bool `json:"is_default,omitempty"` +} + +// ToSubnetPoolCreateMap constructs a request body from CreateOpts. +func (opts CreateOpts) ToSubnetPoolCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "subnetpool") +} + +// Create requests the creation of a new subnetpool on the server. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSubnetPoolCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{201}, + }) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetPoolUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + // Name is the human-readable name of the subnetpool. + Name string `json:"name,omitempty"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota *int `json:"default_quota,omitempty"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id,omitempty"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id,omitempty"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes,omitempty"` + + // DefaultPrefixLen is yhe size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"default_prefixlen,omitempty"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"min_prefixlen,omitempty"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"max_prefixlen,omitempty"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID *string `json:"address_scope_id,omitempty"` + + // Description is thehuman-readable description for the resource. + Description *string `json:"description,omitempty"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault *bool `json:"is_default,omitempty"` +} + +// ToSubnetPoolUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSubnetPoolUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "subnetpool") +} + +// Update accepts a UpdateOpts struct and updates an existing subnetpool using the +// values provided. +func Update(c *gophercloud.ServiceClient, subnetPoolID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSubnetPoolUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, subnetPoolID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete accepts a unique ID and deletes the subnetpool associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go new file mode 100644 index 000000000000..e761eac44d13 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/results.go @@ -0,0 +1,200 @@ +package subnetpools + +import ( + "encoding/json" + "fmt" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnetpool resource. +func (r commonResult) Extract() (*SubnetPool, error) { + var s struct { + SubnetPool *SubnetPool `json:"subnetpool"` + } + err := r.ExtractInto(&s) + return s.SubnetPool, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a SubnetPool. +type GetResult struct { + commonResult +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a SubnetPool. +type CreateResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a SubnetPool. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// SubnetPool represents a Neutron subnetpool. +// A subnetpool is a pool of addresses from which subnets can be allocated. +type SubnetPool struct { + // ID is the id of the subnetpool. + ID string `json:"id"` + + // Name is the human-readable name of the subnetpool. + Name string `json:"name"` + + // DefaultQuota is the per-project quota on the prefix space + // that can be allocated from the subnetpool for project subnets. + DefaultQuota int `json:"default_quota"` + + // TenantID is the id of the Identity project. + TenantID string `json:"tenant_id"` + + // ProjectID is the id of the Identity project. + ProjectID string `json:"project_id"` + + // CreatedAt is the time at which subnetpool has been created. + CreatedAt time.Time `json:"created_at"` + + // UpdatedAt is the time at which subnetpool has been created. + UpdatedAt time.Time `json:"updated_at"` + + // Prefixes is the list of subnet prefixes to assign to the subnetpool. + // Neutron API merges adjacent prefixes and treats them as a single prefix. + // Each subnet prefix must be unique among all subnet prefixes in all subnetpools + // that are associated with the address scope. + Prefixes []string `json:"prefixes"` + + // DefaultPrefixLen is yhe size of the prefix to allocate when the cidr + // or prefixlen attributes are omitted when you create the subnet. + // Defaults to the MinPrefixLen. + DefaultPrefixLen int `json:"-"` + + // MinPrefixLen is the smallest prefix that can be allocated from a subnetpool. + // For IPv4 subnetpools, default is 8. + // For IPv6 subnetpools, default is 64. + MinPrefixLen int `json:"-"` + + // MaxPrefixLen is the maximum prefix size that can be allocated from the subnetpool. + // For IPv4 subnetpools, default is 32. + // For IPv6 subnetpools, default is 128. + MaxPrefixLen int `json:"-"` + + // AddressScopeID is the Neutron address scope to assign to the subnetpool. + AddressScopeID string `json:"address_scope_id"` + + // IPversion is the IP protocol version. + // Valid value is 4 or 6. Default is 4. + IPversion int `json:"ip_version"` + + // Shared indicates whether this network is shared across all projects. + Shared bool `json:"shared"` + + // Description is thehuman-readable description for the resource. + Description string `json:"description"` + + // IsDefault indicates if the subnetpool is default pool or not. + IsDefault bool `json:"is_default"` + + // RevisionNumber is the revision number of the subnetpool. + RevisionNumber int `json:"revision_number"` +} + +func (r *SubnetPool) UnmarshalJSON(b []byte) error { + type tmp SubnetPool + var s struct { + tmp + DefaultPrefixLen interface{} `json:"default_prefixlen"` + MinPrefixLen interface{} `json:"min_prefixlen"` + MaxPrefixLen interface{} `json:"max_prefixlen"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = SubnetPool(s.tmp) + + switch t := s.DefaultPrefixLen.(type) { + case string: + if r.DefaultPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.DefaultPrefixLen = int(t) + default: + return fmt.Errorf("DefaultPrefixLen has unexpected type: %T", t) + } + + switch t := s.MinPrefixLen.(type) { + case string: + if r.MinPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.MinPrefixLen = int(t) + default: + return fmt.Errorf("MinPrefixLen has unexpected type: %T", t) + } + + switch t := s.MaxPrefixLen.(type) { + case string: + if r.MaxPrefixLen, err = strconv.Atoi(t); err != nil { + return err + } + case float64: + r.MaxPrefixLen = int(t) + default: + return fmt.Errorf("MaxPrefixLen has unexpected type: %T", t) + } + + return nil +} + +// SubnetPoolPage stores a single page of SubnetPools from a List() API call. +type SubnetPoolPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnetpools has reached +// the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r SubnetPoolPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"subnetpools_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty determines whether or not a SubnetPoolPage is empty. +func (r SubnetPoolPage) IsEmpty() (bool, error) { + subnetpools, err := ExtractSubnetPools(r) + return len(subnetpools) == 0, err +} + +// ExtractSubnetPools interprets the results of a single page from a List() API call, +// producing a slice of SubnetPools structs. +func ExtractSubnetPools(r pagination.Page) ([]SubnetPool, error) { + var s struct { + SubnetPools []SubnetPool `json:"subnetpools"` + } + err := (r.(SubnetPoolPage)).ExtractInto(&s) + return s.SubnetPools, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/doc.go new file mode 100644 index 000000000000..7787611308d9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/doc.go @@ -0,0 +1,2 @@ +// subnetpools unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/fixtures.go new file mode 100644 index 000000000000..274202890524 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/fixtures.go @@ -0,0 +1,259 @@ +package testing + +import ( + "time" + + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools" +) + +const SubnetPoolsListResult = ` +{ + "subnetpools": [ + { + "address_scope_id": null, + "created_at": "2017-12-28T07:21:41Z", + "default_prefixlen": "8", + "default_quota": null, + "description": "IPv4", + "id": "d43a57fe-3390-4608-b437-b1307b0adb40", + "ip_version": 4, + "is_default": false, + "max_prefixlen": "32", + "min_prefixlen": "8", + "name": "MyPoolIpv4", + "prefixes": [ + "10.10.10.0/24", + "10.11.11.0/24" + ], + "project_id": "1e2b9857295a4a3e841809ef492812c5", + "revision_number": 1, + "shared": false, + "tenant_id": "1e2b9857295a4a3e841809ef492812c5", + "updated_at": "2017-12-28T07:21:41Z" + }, + { + "address_scope_id": "0bc38e22-be49-4e67-969e-fec3f36508bd", + "created_at": "2017-12-28T07:21:34Z", + "default_prefixlen": "64", + "default_quota": null, + "description": "IPv6", + "id": "832cb7f3-59fe-40cf-8f64-8350ffc03272", + "ip_version": 6, + "is_default": true, + "max_prefixlen": "128", + "min_prefixlen": "64", + "name": "MyPoolIpv6", + "prefixes": [ + "fdf7:b13d:dead:beef::/64", + "fd65:86cc:a334:39b7::/64" + ], + "project_id": "1e2b9857295a4a3e841809ef492812c5", + "revision_number": 1, + "shared": false, + "tenant_id": "1e2b9857295a4a3e841809ef492812c5", + "updated_at": "2017-12-28T07:21:34Z" + }, + { + "address_scope_id": null, + "created_at": "2017-12-28T07:21:27Z", + "default_prefixlen": "64", + "default_quota": 4, + "description": "PublicPool", + "id": "2fe18ae6-58c2-4a85-8bfb-566d6426749b", + "ip_version": 6, + "is_default": false, + "max_prefixlen": "128", + "min_prefixlen": "64", + "name": "PublicIPv6", + "prefixes": [ + "2001:db8::a3/64" + ], + "project_id": "ceb366d50ad54fe39717df3af60f9945", + "revision_number": 1, + "shared": true, + "tenant_id": "ceb366d50ad54fe39717df3af60f9945", + "updated_at": "2017-12-28T07:21:27Z" + } + ] +} +` + +var SubnetPool1 = subnetpools.SubnetPool{ + AddressScopeID: "", + CreatedAt: time.Date(2017, 12, 28, 7, 21, 41, 0, time.UTC), + DefaultPrefixLen: 8, + DefaultQuota: 0, + Description: "IPv4", + ID: "d43a57fe-3390-4608-b437-b1307b0adb40", + IPversion: 4, + IsDefault: false, + MaxPrefixLen: 32, + MinPrefixLen: 8, + Name: "MyPoolIpv4", + Prefixes: []string{ + "10.10.10.0/24", + "10.11.11.0/24", + }, + ProjectID: "1e2b9857295a4a3e841809ef492812c5", + TenantID: "1e2b9857295a4a3e841809ef492812c5", + RevisionNumber: 1, + Shared: false, + UpdatedAt: time.Date(2017, 12, 28, 7, 21, 41, 0, time.UTC), +} + +var SubnetPool2 = subnetpools.SubnetPool{ + AddressScopeID: "0bc38e22-be49-4e67-969e-fec3f36508bd", + CreatedAt: time.Date(2017, 12, 28, 7, 21, 34, 0, time.UTC), + DefaultPrefixLen: 64, + DefaultQuota: 0, + Description: "IPv6", + ID: "832cb7f3-59fe-40cf-8f64-8350ffc03272", + IPversion: 6, + IsDefault: true, + MaxPrefixLen: 128, + MinPrefixLen: 64, + Name: "MyPoolIpv6", + Prefixes: []string{ + "fdf7:b13d:dead:beef::/64", + "fd65:86cc:a334:39b7::/64", + }, + ProjectID: "1e2b9857295a4a3e841809ef492812c5", + TenantID: "1e2b9857295a4a3e841809ef492812c5", + RevisionNumber: 1, + Shared: false, + UpdatedAt: time.Date(2017, 12, 28, 7, 21, 34, 0, time.UTC), +} + +var SubnetPool3 = subnetpools.SubnetPool{ + AddressScopeID: "", + CreatedAt: time.Date(2017, 12, 28, 7, 21, 27, 0, time.UTC), + DefaultPrefixLen: 64, + DefaultQuota: 4, + Description: "PublicPool", + ID: "2fe18ae6-58c2-4a85-8bfb-566d6426749b", + IPversion: 6, + IsDefault: false, + MaxPrefixLen: 128, + MinPrefixLen: 64, + Name: "PublicIPv6", + Prefixes: []string{ + "2001:db8::a3/64", + }, + ProjectID: "ceb366d50ad54fe39717df3af60f9945", + TenantID: "ceb366d50ad54fe39717df3af60f9945", + RevisionNumber: 1, + Shared: true, + UpdatedAt: time.Date(2017, 12, 28, 7, 21, 27, 0, time.UTC), +} + +const SubnetPoolGetResult = ` +{ + "subnetpool": { + "min_prefixlen": "64", + "address_scope_id": null, + "default_prefixlen": "64", + "id": "0a738452-8057-4ad3-89c2-92f6a74afa76", + "max_prefixlen": "128", + "name": "my-ipv6-pool", + "default_quota": 2, + "is_default": true, + "project_id": "1e2b9857295a4a3e841809ef492812c5", + "tenant_id": "1e2b9857295a4a3e841809ef492812c5", + "created_at": "2018-01-01T00:00:01Z", + "prefixes": [ + "2001:db8::a3/64" + ], + "updated_at": "2018-01-01T00:10:10Z", + "ip_version": 6, + "shared": false, + "description": "ipv6 prefixes", + "revision_number": 2 + } +} +` + +const SubnetPoolCreateRequest = ` +{ + "subnetpool": { + "name": "my_ipv4_pool", + "prefixes": [ + "10.10.0.0/16", + "10.11.11.0/24" + ], + "address_scope_id": "3d4e2e2a-552b-42ad-a16d-820bbf3edaf3", + "min_prefixlen": 25, + "max_prefixlen": 30, + "description": "ipv4 prefixes" + } +} +` + +const SubnetPoolCreateResult = ` +{ + "subnetpool": { + "address_scope_id": "3d4e2e2a-552b-42ad-a16d-820bbf3edaf3", + "created_at": "2018-01-01T00:00:15Z", + "default_prefixlen": "25", + "default_quota": null, + "description": "ipv4 prefixes", + "id": "55b5999c-c2fe-42cd-bce0-961a551b80f5", + "ip_version": 4, + "is_default": false, + "max_prefixlen": "30", + "min_prefixlen": "25", + "name": "my_ipv4_pool", + "prefixes": [ + "10.10.0.0/16", + "10.11.11.0/24" + ], + "project_id": "1e2b9857295a4a3e841809ef492812c5", + "revision_number": 1, + "shared": false, + "tenant_id": "1e2b9857295a4a3e841809ef492812c5", + "updated_at": "2018-01-01T00:00:15Z" + } +} +` + +const SubnetPoolUpdateRequest = ` +{ + "subnetpool": { + "name": "new_subnetpool_name", + "prefixes": [ + "10.11.12.0/24", + "10.24.0.0/16" + ], + "max_prefixlen": 16, + "address_scope_id": "", + "default_quota": 0, + "description": "" + } +} +` + +const SubnetPoolUpdateResponse = ` +{ + "subnetpool": { + "address_scope_id": null, + "created_at": "2018-01-03T07:21:34Z", + "default_prefixlen": 8, + "default_quota": null, + "description": null, + "id": "099546ca-788d-41e5-a76d-17d8cd282d3e", + "ip_version": 4, + "is_default": true, + "max_prefixlen": 16, + "min_prefixlen": 8, + "name": "new_subnetpool_name", + "prefixes": [ + "10.8.0.0/16", + "10.11.12.0/24", + "10.24.0.0/16" + ], + "revision_number": 2, + "shared": false, + "tenant_id": "1e2b9857295a4a3e841809ef492812c5", + "updated_at": "2018-01-05T09:56:56Z" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/requests_test.go new file mode 100644 index 000000000000..3d138d37b337 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/testing/requests_test.go @@ -0,0 +1,192 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnetpools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SubnetPoolsListResult) + }) + + count := 0 + + subnetpools.List(fake.ServiceClient(), subnetpools.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := subnetpools.ExtractSubnetPools(page) + if err != nil { + t.Errorf("Failed to extract subnetpools: %v", err) + return false, nil + } + + expected := []subnetpools.SubnetPool{ + SubnetPool1, + SubnetPool2, + SubnetPool3, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnetpools/0a738452-8057-4ad3-89c2-92f6a74afa76", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SubnetPoolGetResult) + }) + + s, err := subnetpools.Get(fake.ServiceClient(), "0a738452-8057-4ad3-89c2-92f6a74afa76").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.ID, "0a738452-8057-4ad3-89c2-92f6a74afa76") + th.AssertEquals(t, s.Name, "my-ipv6-pool") + th.AssertEquals(t, s.DefaultQuota, 2) + th.AssertEquals(t, s.TenantID, "1e2b9857295a4a3e841809ef492812c5") + th.AssertEquals(t, s.ProjectID, "1e2b9857295a4a3e841809ef492812c5") + th.AssertEquals(t, s.CreatedAt, time.Date(2018, 1, 1, 0, 0, 1, 0, time.UTC)) + th.AssertEquals(t, s.UpdatedAt, time.Date(2018, 1, 1, 0, 10, 10, 0, time.UTC)) + th.AssertDeepEquals(t, s.Prefixes, []string{ + "2001:db8::a3/64", + }) + th.AssertEquals(t, s.DefaultPrefixLen, 64) + th.AssertEquals(t, s.MinPrefixLen, 64) + th.AssertEquals(t, s.MaxPrefixLen, 128) + th.AssertEquals(t, s.AddressScopeID, "") + th.AssertEquals(t, s.IPversion, 6) + th.AssertEquals(t, s.Shared, false) + th.AssertEquals(t, s.Description, "ipv6 prefixes") + th.AssertEquals(t, s.IsDefault, true) + th.AssertEquals(t, s.RevisionNumber, 2) +} +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnetpools", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetPoolCreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetPoolCreateResult) + }) + + opts := subnetpools.CreateOpts{ + Name: "my_ipv4_pool", + Prefixes: []string{ + "10.10.0.0/16", + "10.11.11.0/24", + }, + MinPrefixLen: 25, + MaxPrefixLen: 30, + AddressScopeID: "3d4e2e2a-552b-42ad-a16d-820bbf3edaf3", + Description: "ipv4 prefixes", + } + s, err := subnetpools.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_ipv4_pool") + th.AssertDeepEquals(t, s.Prefixes, []string{ + "10.10.0.0/16", + "10.11.11.0/24", + }) + th.AssertEquals(t, s.MinPrefixLen, 25) + th.AssertEquals(t, s.MaxPrefixLen, 30) + th.AssertEquals(t, s.AddressScopeID, "3d4e2e2a-552b-42ad-a16d-820bbf3edaf3") + th.AssertEquals(t, s.Description, "ipv4 prefixes") +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnetpools/099546ca-788d-41e5-a76d-17d8cd282d3e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetPoolUpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SubnetPoolUpdateResponse) + }) + + nullString := "" + nullInt := 0 + updateOpts := subnetpools.UpdateOpts{ + Name: "new_subnetpool_name", + Prefixes: []string{ + "10.11.12.0/24", + "10.24.0.0/16", + }, + MaxPrefixLen: 16, + AddressScopeID: &nullString, + DefaultQuota: &nullInt, + Description: &nullString, + } + n, err := subnetpools.Update(fake.ServiceClient(), "099546ca-788d-41e5-a76d-17d8cd282d3e", updateOpts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "new_subnetpool_name") + th.AssertDeepEquals(t, n.Prefixes, []string{ + "10.8.0.0/16", + "10.11.12.0/24", + "10.24.0.0/16", + }) + th.AssertEquals(t, n.MaxPrefixLen, 16) + th.AssertEquals(t, n.ID, "099546ca-788d-41e5-a76d-17d8cd282d3e") + th.AssertEquals(t, n.AddressScopeID, "") + th.AssertEquals(t, n.DefaultQuota, 0) + th.AssertEquals(t, n.Description, "") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnetpools/099546ca-788d-41e5-a76d-17d8cd282d3e", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := subnetpools.Delete(fake.ServiceClient(), "099546ca-788d-41e5-a76d-17d8cd282d3e") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go new file mode 100644 index 000000000000..a05062c96d6f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/subnetpools/urls.go @@ -0,0 +1,33 @@ +package subnetpools + +import "github.com/gophercloud/gophercloud" + +const resourcePath = "subnetpools" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(resourcePath, id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(resourcePath) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/delegate_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/delegate_test.go new file mode 100644 index 000000000000..20a85f95b2ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/delegate_test.go @@ -0,0 +1,106 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + common "github.com/gophercloud/gophercloud/openstack/common/extensions" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/extensions", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, ` +{ + "extensions": [ + { + "updated": "2013-01-20T00:00:00-00:00", + "name": "Neutron Service Type Management", + "links": [], + "namespace": "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + "alias": "service-type", + "description": "API for retrieving service providers for Neutron advanced services" + } + ] +} + `) + }) + + count := 0 + + extensions.List(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := extensions.ExtractExtensions(page) + if err != nil { + t.Errorf("Failed to extract extensions: %v", err) + } + + expected := []extensions.Extension{ + { + Extension: common.Extension{ + Updated: "2013-01-20T00:00:00-00:00", + Name: "Neutron Service Type Management", + Links: []interface{}{}, + Namespace: "http://docs.openstack.org/ext/neutron/service-type/api/v1.0", + Alias: "service-type", + Description: "API for retrieving service providers for Neutron advanced services", + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/extensions/agent", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "extension": { + "updated": "2013-02-03T10:00:00-00:00", + "name": "agent", + "links": [], + "namespace": "http://docs.openstack.org/ext/agent/api/v2.0", + "alias": "agent", + "description": "The agent management extension." + } +} + `) + }) + + ext, err := extensions.Get(fake.ServiceClient(), "agent").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, ext.Updated, "2013-02-03T10:00:00-00:00") + th.AssertEquals(t, ext.Name, "agent") + th.AssertEquals(t, ext.Namespace, "http://docs.openstack.org/ext/agent/api/v2.0") + th.AssertEquals(t, ext.Alias, "agent") + th.AssertEquals(t, ext.Description, "The agent management extension.") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/doc.go new file mode 100644 index 000000000000..3c5d45926323 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/testing/doc.go @@ -0,0 +1,2 @@ +// extensions unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go new file mode 100644 index 000000000000..5f49bd1da467 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/doc.go @@ -0,0 +1,58 @@ +/* +Package endpointgroups allows management of endpoint groups in the Openstack Network Service + +Example to create an Endpoint Group + + createOpts := endpointgroups.CreateOpts{ + Name: groupName, + Type: endpointgroups.TypeCIDR, + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + } + group, err := endpointgroups.Create(client, createOpts).Extract() + if err != nil { + return group, err + } + +Example to retrieve an Endpoint Group + + group, err := endpointgroups.Get(client, "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a").Extract() + if err != nil { + panic(err) + } + +Example to Delete an Endpoint Group + + err := endpointgroups.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + } + +Example to List Endpoint groups + + allPages, err := endpointgroups.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allGroups, err := endpointgroups.ExtractEndpointGroups(allPages) + if err != nil { + panic(err) + } + +Example to Update an endpoint group + + name := "updatedname" + description := "updated description" + updateOpts := endpointgroups.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedPolicy, err := endpointgroups.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } +*/ +package endpointgroups diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go new file mode 100644 index 000000000000..c12d0a800496 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/requests.go @@ -0,0 +1,144 @@ +package endpointgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type EndpointType string + +const ( + TypeSubnet EndpointType = "subnet" + TypeCIDR EndpointType = "cidr" + TypeVLAN EndpointType = "vlan" + TypeNetwork EndpointType = "network" + TypeRouter EndpointType = "router" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToEndpointGroupCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new endpoint group +type CreateOpts struct { + // TenantID specifies a tenant to own the endpoint group. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the endpoint group. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the endpoint group. + Name string `json:"name,omitempty"` + + // The type of the endpoints in the group. + // A valid value is subnet, cidr, network, router, or vlan. + Type EndpointType `json:"type,omitempty"` + + // List of endpoints of the same type, for the endpoint group. + // The values will depend on the type. + Endpoints []string `json:"endpoints"` +} + +// ToEndpointGroupCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToEndpointGroupCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint_group") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// endpoint group. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToEndpointGroupCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular endpoint group based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToEndpointGroupListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the Endpoint group attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Description string `q:"description"` + Name string `q:"name"` + Type string `q:"type"` +} + +// ToEndpointGroupListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToEndpointGroupListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// Endpoint groups. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToEndpointGroupListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return EndpointGroupPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Delete will permanently delete a particular endpoint group based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToEndpointGroupUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating an endpoint group. +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` +} + +// ToEndpointGroupUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToEndpointGroupUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "endpoint_group") +} + +// Update allows endpoint groups to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToEndpointGroupUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go new file mode 100644 index 000000000000..822b70002c52 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/results.go @@ -0,0 +1,104 @@ +package endpointgroups + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// EndpointGroup is an endpoint group. +type EndpointGroup struct { + // TenantID specifies a tenant to own the endpoint group. + TenantID string `json:"tenant_id"` + + // TenantID specifies a tenant to own the endpoint group. + ProjectID string `json:"project_id"` + + // Description is the human readable description of the endpoint group. + Description string `json:"description"` + + // Name is the human readable name of the endpoint group. + Name string `json:"name"` + + // Type is the type of the endpoints in the group. + Type string `json:"type"` + + // Endpoints is a list of endpoints. + Endpoints []string `json:"endpoints"` + + // ID is the id of the endpoint group + ID string `json:"id"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an endpoint group. +func (r commonResult) Extract() (*EndpointGroup, error) { + var s struct { + Service *EndpointGroup `json:"endpoint_group"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// EndpointGroupPage is the page returned by a pager when traversing over a +// collection of Policies. +type EndpointGroupPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of Endpoint groups has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r EndpointGroupPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"endpoint_groups_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether an EndpointGroupPage struct is empty. +func (r EndpointGroupPage) IsEmpty() (bool, error) { + is, err := ExtractEndpointGroups(r) + return len(is) == 0, err +} + +// ExtractEndpointGroups accepts a Page struct, specifically an EndpointGroupPage struct, +// and extracts the elements into a slice of Endpoint group structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractEndpointGroups(r pagination.Page) ([]EndpointGroup, error) { + var s struct { + EndpointGroups []EndpointGroup `json:"endpoint_groups"` + } + err := (r.(EndpointGroupPage)).ExtractInto(&s) + return s.EndpointGroups, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as an endpoint group. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as an EndpointGroup. +type GetResult struct { + commonResult +} + +// DeleteResult represents the results of a Delete operation. Call its ExtractErr method +// to determine whether the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract method +// to interpret it as an EndpointGroup. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/testing/requests_test.go new file mode 100644 index 000000000000..7feac37f7841 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/testing/requests_test.go @@ -0,0 +1,265 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/endpoint-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "endpoint_group": { + "endpoints": [ + "10.2.0.0/24", + "10.3.0.0/24" + ], + "type": "cidr", + "name": "peers" + } +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "endpoint_group": { + "description": "", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "endpoints": [ + "10.2.0.0/24", + "10.3.0.0/24" + ], + "type": "cidr", + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "peers" + } +} + `) + }) + + options := endpointgroups.CreateOpts{ + Name: "peers", + Type: endpointgroups.TypeCIDR, + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + } + actual, err := endpointgroups.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + expected := endpointgroups.EndpointGroup{ + Name: "peers", + TenantID: "4ad57e7ce0b24fca8f12b9834d91079d", + ProjectID: "4ad57e7ce0b24fca8f12b9834d91079d", + ID: "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + Description: "", + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + Type: "cidr", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/endpoint-groups/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "endpoint_group": { + "description": "", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "endpoints": [ + "10.2.0.0/24", + "10.3.0.0/24" + ], + "type": "cidr", + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "peers" + } +} + `) + }) + + actual, err := endpointgroups.Get(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828").Extract() + th.AssertNoErr(t, err) + expected := endpointgroups.EndpointGroup{ + Name: "peers", + TenantID: "4ad57e7ce0b24fca8f12b9834d91079d", + ProjectID: "4ad57e7ce0b24fca8f12b9834d91079d", + ID: "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + Description: "", + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + Type: "cidr", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/endpoint-groups", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "endpoint_groups": [ + { + "description": "", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "endpoints": [ + "10.2.0.0/24", + "10.3.0.0/24" + ], + "type": "cidr", + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "peers" + } + ] +} + `) + }) + + count := 0 + + endpointgroups.List(fake.ServiceClient(), endpointgroups.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := endpointgroups.ExtractEndpointGroups(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + expected := []endpointgroups.EndpointGroup{ + { + Name: "peers", + TenantID: "4ad57e7ce0b24fca8f12b9834d91079d", + ProjectID: "4ad57e7ce0b24fca8f12b9834d91079d", + ID: "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + Description: "", + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + Type: "cidr", + }, + } + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/endpoint-groups/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := endpointgroups.Delete(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/endpoint-groups/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "endpoint_group": { + "description": "updated description", + "name": "updatedname" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "endpoint_group": { + "description": "updated description", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "endpoints": [ + "10.2.0.0/24", + "10.3.0.0/24" + ], + "type": "cidr", + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "updatedname" + } +} +`) + }) + + updatedName := "updatedname" + updatedDescription := "updated description" + options := endpointgroups.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + } + + actual, err := endpointgroups.Update(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828", options).Extract() + th.AssertNoErr(t, err) + expected := endpointgroups.EndpointGroup{ + Name: "updatedname", + TenantID: "4ad57e7ce0b24fca8f12b9834d91079d", + ProjectID: "4ad57e7ce0b24fca8f12b9834d91079d", + ID: "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + Description: "updated description", + Endpoints: []string{ + "10.2.0.0/24", + "10.3.0.0/24", + }, + Type: "cidr", + } + th.AssertDeepEquals(t, expected, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go new file mode 100644 index 000000000000..9e83563ce18e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/endpointgroups/urls.go @@ -0,0 +1,16 @@ +package endpointgroups + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "endpoint-groups" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go new file mode 100644 index 000000000000..ee44279afa70 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/doc.go @@ -0,0 +1,64 @@ +/* +Package ikepolicies allows management and retrieval of IKE policies in the +OpenStack Networking Service. + + +Example to Create an IKE policy + + createOpts := ikepolicies.CreateOpts{ + Name: "ikepolicy1", + Description: "Description of ikepolicy1", + EncryptionAlgorithm: ikepolicies.EncryptionAlgorithm3DES, + PFS: ikepolicies.PFSGroup5, + } + + policy, err := ikepolicies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IKE policy by ID + + policy, err := ikepolicies.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + + +Example to Delete a Policy + + err := ikepolicies.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + +Example to Update an IKE policy + + name := "updatedname" + description := "updated policy" + updateOpts := ikepolicies.UpdateOpts{ + Name: &name, + Description: &description, + Lifetime: &ikepolicies.LifetimeUpdateOpts{ + Value: 7000, + }, + } + updatedPolicy, err := ikepolicies.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + + +Example to List IKE policies + + allPages, err := ikepolicies.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := ikepolicies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + +*/ +package ikepolicies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go new file mode 100644 index 000000000000..6b084ff62428 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/requests.go @@ -0,0 +1,209 @@ +package ikepolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type AuthAlgorithm string +type EncryptionAlgorithm string +type PFS string +type Unit string +type IKEVersion string +type Phase1NegotiationMode string + +const ( + AuthAlgorithmSHA1 AuthAlgorithm = "sha1" + AuthAlgorithmSHA256 AuthAlgorithm = "sha256" + AuthAlgorithmSHA384 AuthAlgorithm = "sha384" + AuthAlgorithmSHA512 AuthAlgorithm = "sha512" + EncryptionAlgorithm3DES EncryptionAlgorithm = "3des" + EncryptionAlgorithmAES128 EncryptionAlgorithm = "aes-128" + EncryptionAlgorithmAES256 EncryptionAlgorithm = "aes-256" + EncryptionAlgorithmAES192 EncryptionAlgorithm = "aes-192" + UnitSeconds Unit = "seconds" + UnitKilobytes Unit = "kilobytes" + PFSGroup2 PFS = "group2" + PFSGroup5 PFS = "group5" + PFSGroup14 PFS = "group14" + IKEVersionv1 IKEVersion = "v1" + IKEVersionv2 IKEVersion = "v2" + Phase1NegotiationModeMain Phase1NegotiationMode = "main" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new IKE policy +type CreateOpts struct { + // TenantID specifies a tenant to own the IKE policy. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the policy. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the policy. + // Does not have to be unique. + Name string `json:"name,omitempty"` + + // AuthAlgorithm is the authentication hash algorithm. + // Valid values are sha1, sha256, sha384, sha512. + // The default is sha1. + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + + // EncryptionAlgorithm is the encryption algorithm. + // A valid value is 3des, aes-128, aes-192, aes-256, and so on. + // Default is aes-128. + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + + // PFS is the Perfect forward secrecy mode. + // A valid value is Group2, Group5, Group14, and so on. + // Default is Group5. + PFS PFS `json:"pfs,omitempty"` + + // The IKE mode. + // A valid value is main, which is the default. + Phase1NegotiationMode Phase1NegotiationMode `json:"phase1_negotiation_mode,omitempty"` + + // The IKE version. + // A valid value is v1 or v2. + // Default is v1. + IKEVersion IKEVersion `json:"ike_version,omitempty"` + + //Lifetime is the lifetime of the security association + Lifetime *LifetimeCreateOpts `json:"lifetime,omitempty"` +} + +// The lifetime consists of a unit and integer value +// You can omit either the unit or value portion of the lifetime +type LifetimeCreateOpts struct { + // Units is the units for the lifetime of the security association + // Default unit is seconds + Units Unit `json:"units,omitempty"` + + // The lifetime value. + // Must be a positive integer. + // Default value is 3600. + Value int `json:"value,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ikepolicy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IKE policy +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Get retrieves a particular IKE policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// Delete will permanently delete a particular IKE policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IKE policy attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + ProjectID string `q:"project_id"` + AuthAlgorithm string `q:"auth_algorithm"` + EncapsulationMode string `q:"encapsulation_mode"` + EncryptionAlgorithm string `q:"encryption_algorithm"` + PFS string `q:"pfs"` + Phase1NegotiationMode string `q:"phase_1_negotiation_mode"` + IKEVersion string `q:"ike_version"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IKE policies. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +type LifetimeUpdateOpts struct { + Units Unit `json:"units,omitempty"` + Value int `json:"value,omitempty"` +} + +// UpdateOpts contains the values used when updating an IKE policy +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + PFS PFS `json:"pfs,omitempty"` + Lifetime *LifetimeUpdateOpts `json:"lifetime,omitempty"` + Phase1NegotiationMode Phase1NegotiationMode `json:"phase_1_negotiation_mode,omitempty"` + IKEVersion IKEVersion `json:"ike_version,omitempty"` +} + +// ToPolicyUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ikepolicy") +} + +// Update allows IKE policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go new file mode 100644 index 000000000000..b825f5754f68 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/results.go @@ -0,0 +1,125 @@ +package ikepolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is an IKE Policy +type Policy struct { + // TenantID is the ID of the project + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project + ProjectID string `json:"project_id"` + + // Description is the human readable description of the policy + Description string `json:"description"` + + // Name is the human readable name of the policy + Name string `json:"name"` + + // AuthAlgorithm is the authentication hash algorithm + AuthAlgorithm string `json:"auth_algorithm"` + + // EncryptionAlgorithm is the encryption algorithm + EncryptionAlgorithm string `json:"encryption_algorithm"` + + // PFS is the Perfect forward secrecy (PFS) mode + PFS string `json:"pfs"` + + // Lifetime is the lifetime of the security association + Lifetime Lifetime `json:"lifetime"` + + // ID is the ID of the policy + ID string `json:"id"` + + // Phase1NegotiationMode is the IKE mode + Phase1NegotiationMode string `json:"phase1_negotiation_mode"` + + // IKEVersion is the IKE version. + IKEVersion string `json:"ike_version"` +} + +type commonResult struct { + gophercloud.Result +} +type Lifetime struct { + // Units is the unit for the lifetime + // Default is seconds + Units string `json:"units"` + + // Value is the lifetime + // Default is 3600 + Value int `json:"value"` +} + +// Extract is a function that accepts a result and extracts an IKE Policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"ikepolicy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of Policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IKE policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ikepolicies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"ikepolicies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// CreateResult represents the result of a Create operation. Call its Extract method to +// interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a Get operation. Call its Extract method to +// interpret it as a Policy. +type GetResult struct { + commonResult +} + +// DeleteResult represents the results of a Delete operation. Call its ExtractErr method +// to determine whether the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract method +// to interpret it as a Policy. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/testing/requests_test.go new file mode 100644 index 000000000000..b3ec548da7a6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/testing/requests_test.go @@ -0,0 +1,304 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ikepolicies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "ikepolicy":{ + "name": "policy", + "description": "IKE policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "ike_version": "v2" + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "ikepolicy":{ + "name": "policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "project_id": "9145d91459d248b1b02fdaca97c6a75d", + "id": "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + "description": "IKE policy", + "auth_algorithm": "sha1", + "encryption_algorithm": "aes-128", + "pfs": "Group5", + "lifetime": { + "value": 3600, + "units": "seconds" + }, + "phase1_negotiation_mode": "main", + "ike_version": "v2" + } +} + `) + }) + + options := ikepolicies.CreateOpts{ + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Name: "policy", + Description: "IKE policy", + IKEVersion: ikepolicies.IKEVersionv2, + } + + actual, err := ikepolicies.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + expectedLifetime := ikepolicies.Lifetime{ + Units: "seconds", + Value: 3600, + } + expected := ikepolicies.Policy{ + AuthAlgorithm: "sha1", + IKEVersion: "v2", + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + Phase1NegotiationMode: "main", + PFS: "Group5", + EncryptionAlgorithm: "aes-128", + Description: "IKE policy", + Name: "policy", + ID: "f2b08c1e-aa81-4668-8ae1-1401bcb0576c", + Lifetime: expectedLifetime, + ProjectID: "9145d91459d248b1b02fdaca97c6a75d", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ikepolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ikepolicy":{ + "name": "policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "project_id": "9145d91459d248b1b02fdaca97c6a75d", + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "IKE policy", + "auth_algorithm": "sha1", + "encryption_algorithm": "aes-128", + "pfs": "Group5", + "lifetime": { + "value": 3600, + "units": "seconds" + }, + "phase1_negotiation_mode": "main", + "ike_version": "v2" + } +} + `) + }) + + actual, err := ikepolicies.Get(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828").Extract() + th.AssertNoErr(t, err) + expectedLifetime := ikepolicies.Lifetime{ + Units: "seconds", + Value: 3600, + } + expected := ikepolicies.Policy{ + AuthAlgorithm: "sha1", + IKEVersion: "v2", + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + ProjectID: "9145d91459d248b1b02fdaca97c6a75d", + Phase1NegotiationMode: "main", + PFS: "Group5", + EncryptionAlgorithm: "aes-128", + Description: "IKE policy", + Name: "policy", + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + Lifetime: expectedLifetime, + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ikepolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := ikepolicies.Delete(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828") + th.AssertNoErr(t, res.Err) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ikepolicies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "ikepolicies": [ + { + "name": "policy", + "tenant_id": "9145d91459d248b1b02fdaca97c6a75d", + "project_id": "9145d91459d248b1b02fdaca97c6a75d", + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "IKE policy", + "auth_algorithm": "sha1", + "encryption_algorithm": "aes-128", + "pfs": "Group5", + "lifetime": { + "value": 3600, + "units": "seconds" + }, + "phase1_negotiation_mode": "main", + "ike_version": "v2" + } + ] +} + `) + }) + + count := 0 + + ikepolicies.List(fake.ServiceClient(), ikepolicies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ikepolicies.ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + expectedLifetime := ikepolicies.Lifetime{ + Units: "seconds", + Value: 3600, + } + expected := []ikepolicies.Policy{ + { + AuthAlgorithm: "sha1", + IKEVersion: "v2", + TenantID: "9145d91459d248b1b02fdaca97c6a75d", + ProjectID: "9145d91459d248b1b02fdaca97c6a75d", + Phase1NegotiationMode: "main", + PFS: "Group5", + EncryptionAlgorithm: "aes-128", + Description: "IKE policy", + Name: "policy", + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + Lifetime: expectedLifetime, + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ikepolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "ikepolicy":{ + "name": "updatedname", + "description": "updated policy", + "lifetime": { + "value": 7000 + } + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ikepolicy": { + "name": "updatedname", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "project_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "lifetime": { + "units": "seconds", + "value": 7000 + }, + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "updated policy" + } +} +`) + }) + + updatedName := "updatedname" + updatedDescription := "updated policy" + options := ikepolicies.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + Lifetime: &ikepolicies.LifetimeUpdateOpts{ + Value: 7000, + }, + } + + actual, err := ikepolicies.Update(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828", options).Extract() + th.AssertNoErr(t, err) + expectedLifetime := ikepolicies.Lifetime{ + Units: "seconds", + Value: 7000, + } + expected := ikepolicies.Policy{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + ProjectID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "updatedname", + AuthAlgorithm: "sha1", + EncryptionAlgorithm: "aes-128", + PFS: "group5", + Description: "updated policy", + Lifetime: expectedLifetime, + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + } + th.AssertDeepEquals(t, expected, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go new file mode 100644 index 000000000000..a364a881e64c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ikepolicies/urls.go @@ -0,0 +1,16 @@ +package ikepolicies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ikepolicies" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go new file mode 100644 index 000000000000..91d5451a6e88 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/doc.go @@ -0,0 +1,56 @@ +/* +Package ipsecpolicies allows management and retrieval of IPSec Policies in the +OpenStack Networking Service. + +Example to Create a Policy + + createOpts := ipsecpolicies.CreateOpts{ + Name: "IPSecPolicy_1", + } + + policy, err := policies.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Policy + + err := ipsecpolicies.Delete(client, "5291b189-fd84-46e5-84bd-78f40c05d69c").ExtractErr() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IPSec policy by ID + + policy, err := ipsecpolicies.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +Example to Update an IPSec policy + + name := "updatedname" + description := "updated policy" + updateOpts := ipsecpolicies.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedPolicy, err := ipsecpolicies.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to List IPSec policies + + allPages, err := ipsecpolicies.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := ipsecpolicies.ExtractPolicies(allPages) + if err != nil { + panic(err) + } + +*/ +package ipsecpolicies diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go new file mode 100644 index 000000000000..9496365ca46f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/requests.go @@ -0,0 +1,211 @@ +package ipsecpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type TransformProtocol string +type AuthAlgorithm string +type EncapsulationMode string +type EncryptionAlgorithm string +type PFS string +type Unit string + +const ( + TransformProtocolESP TransformProtocol = "esp" + TransformProtocolAH TransformProtocol = "ah" + TransformProtocolAHESP TransformProtocol = "ah-esp" + AuthAlgorithmSHA1 AuthAlgorithm = "sha1" + AuthAlgorithmSHA256 AuthAlgorithm = "sha256" + AuthAlgorithmSHA384 AuthAlgorithm = "sha384" + AuthAlgorithmSHA512 AuthAlgorithm = "sha512" + EncryptionAlgorithm3DES EncryptionAlgorithm = "3des" + EncryptionAlgorithmAES128 EncryptionAlgorithm = "aes-128" + EncryptionAlgorithmAES256 EncryptionAlgorithm = "aes-256" + EncryptionAlgorithmAES192 EncryptionAlgorithm = "aes-192" + EncapsulationModeTunnel EncapsulationMode = "tunnel" + EncapsulationModeTransport EncapsulationMode = "transport" + UnitSeconds Unit = "seconds" + UnitKilobytes Unit = "kilobytes" + PFSGroup2 PFS = "group2" + PFSGroup5 PFS = "group5" + PFSGroup14 PFS = "group14" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPolicyCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new IPSec policy +type CreateOpts struct { + // TenantID specifies a tenant to own the IPSec policy. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // Description is the human readable description of the policy. + Description string `json:"description,omitempty"` + + // Name is the human readable name of the policy. + // Does not have to be unique. + Name string `json:"name,omitempty"` + + // AuthAlgorithm is the authentication hash algorithm. + // Valid values are sha1, sha256, sha384, sha512. + // The default is sha1. + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + + // EncapsulationMode is the encapsulation mode. + // A valid value is tunnel or transport. + // Default is tunnel. + EncapsulationMode EncapsulationMode `json:"encapsulation_mode,omitempty"` + + // EncryptionAlgorithm is the encryption algorithm. + // A valid value is 3des, aes-128, aes-192, aes-256, and so on. + // Default is aes-128. + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + + // PFS is the Perfect forward secrecy mode. + // A valid value is Group2, Group5, Group14, and so on. + // Default is Group5. + PFS PFS `json:"pfs,omitempty"` + + // TransformProtocol is the transform protocol. + // A valid value is ESP, AH, or AH- ESP. + // Default is ESP. + TransformProtocol TransformProtocol `json:"transform_protocol,omitempty"` + + //Lifetime is the lifetime of the security association + Lifetime *LifetimeCreateOpts `json:"lifetime,omitempty"` +} + +// The lifetime consists of a unit and integer value +// You can omit either the unit or value portion of the lifetime +type LifetimeCreateOpts struct { + // Units is the units for the lifetime of the security association + // Default unit is seconds + Units Unit `json:"units,omitempty"` + + // The lifetime value. + // Must be a positive integer. + // Default value is 3600. + Value int `json:"value,omitempty"` +} + +// ToPolicyCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToPolicyCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsecpolicy") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IPSec policy +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPolicyCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Delete will permanently delete a particular IPSec policy based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// Get retrieves a particular IPSec policy based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPolicyListQuery() (string, error) +} + +// ListOpts allows the filtering of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IPSec policy attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + ProjectID string `q:"project_id"` + AuthAlgorithm string `q:"auth_algorithm"` + EncapsulationMode string `q:"encapsulation_mode"` + EncryptionAlgorithm string `q:"encryption_algorithm"` + PFS string `q:"pfs"` + TransformProtocol string `q:"transform_protocol"` +} + +// ToPolicyListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPolicyListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IPSec policies. It accepts a ListOpts struct, which allows you to filter +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToPolicyListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PolicyPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPolicyUpdateMap() (map[string]interface{}, error) +} + +type LifetimeUpdateOpts struct { + Units Unit `json:"units,omitempty"` + Value int `json:"value,omitempty"` +} + +// UpdateOpts contains the values used when updating an IPSec policy +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + AuthAlgorithm AuthAlgorithm `json:"auth_algorithm,omitempty"` + EncapsulationMode EncapsulationMode `json:"encapsulation_mode,omitempty"` + EncryptionAlgorithm EncryptionAlgorithm `json:"encryption_algorithm,omitempty"` + PFS PFS `json:"pfs,omitempty"` + TransformProtocol TransformProtocol `json:"transform_protocol,omitempty"` + Lifetime *LifetimeUpdateOpts `json:"lifetime,omitempty"` +} + +// ToPolicyUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToPolicyUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsecpolicy") +} + +// Update allows IPSec policies to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPolicyUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go new file mode 100644 index 000000000000..eda4a1bd233f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/results.go @@ -0,0 +1,126 @@ +package ipsecpolicies + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Policy is an IPSec Policy +type Policy struct { + // TenantID is the ID of the project + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project + ProjectID string `json:"project_id"` + + // Description is the human readable description of the policy + Description string `json:"description"` + + // Name is the human readable name of the policy + Name string `json:"name"` + + // AuthAlgorithm is the authentication hash algorithm + AuthAlgorithm string `json:"auth_algorithm"` + + // EncapsulationMode is the encapsulation mode + EncapsulationMode string `json:"encapsulation_mode"` + + // EncryptionAlgorithm is the encryption algorithm + EncryptionAlgorithm string `json:"encryption_algorithm"` + + // PFS is the Perfect forward secrecy (PFS) mode + PFS string `json:"pfs"` + + // TransformProtocol is the transform protocol + TransformProtocol string `json:"transform_protocol"` + + // Lifetime is the lifetime of the security association + Lifetime Lifetime `json:"lifetime"` + + // ID is the ID of the policy + ID string `json:"id"` +} + +type Lifetime struct { + // Units is the unit for the lifetime + // Default is seconds + Units string `json:"units"` + + // Value is the lifetime + // Default is 3600 + Value int `json:"value"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an IPSec Policy. +func (r commonResult) Extract() (*Policy, error) { + var s struct { + Policy *Policy `json:"ipsecpolicy"` + } + err := r.ExtractInto(&s) + return s.Policy, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Policy. +type CreateResult struct { + commonResult +} + +// CreateResult represents the result of a delete operation. Call its ExtractErr method +// to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Policy. +type GetResult struct { + commonResult +} + +// PolicyPage is the page returned by a pager when traversing over a +// collection of Policies. +type PolicyPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IPSec policies has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r PolicyPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ipsecpolicies_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PolicyPage struct is empty. +func (r PolicyPage) IsEmpty() (bool, error) { + is, err := ExtractPolicies(r) + return len(is) == 0, err +} + +// ExtractPolicies accepts a Page struct, specifically a Policy struct, +// and extracts the elements into a slice of Policy structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPolicies(r pagination.Page) ([]Policy, error) { + var s struct { + Policies []Policy `json:"ipsecpolicies"` + } + err := (r.(PolicyPage)).ExtractInto(&s) + return s.Policies, err +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Policy. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/testing/requests_test.go new file mode 100644 index 000000000000..702bd38355cd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/testing/requests_test.go @@ -0,0 +1,321 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsecpolicies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "ipsecpolicy": { + "name": "ipsecpolicy1", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "lifetime": { + "units": "seconds", + "value": 7200 + }, + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b" +} +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "ipsecpolicy": { + "name": "ipsecpolicy1", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "project_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "lifetime": { + "units": "seconds", + "value": 7200 + }, + "id": "5291b189-fd84-46e5-84bd-78f40c05d69c", + "description": "" + } +} + `) + }) + + lifetime := ipsecpolicies.LifetimeCreateOpts{ + Units: ipsecpolicies.UnitSeconds, + Value: 7200, + } + options := ipsecpolicies.CreateOpts{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "ipsecpolicy1", + TransformProtocol: ipsecpolicies.TransformProtocolESP, + AuthAlgorithm: ipsecpolicies.AuthAlgorithmSHA1, + EncapsulationMode: ipsecpolicies.EncapsulationModeTunnel, + EncryptionAlgorithm: ipsecpolicies.EncryptionAlgorithmAES128, + PFS: ipsecpolicies.PFSGroup5, + Lifetime: &lifetime, + Description: "", + } + actual, err := ipsecpolicies.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + expectedLifetime := ipsecpolicies.Lifetime{ + Units: "seconds", + Value: 7200, + } + expected := ipsecpolicies.Policy{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "ipsecpolicy1", + TransformProtocol: "esp", + AuthAlgorithm: "sha1", + EncapsulationMode: "tunnel", + EncryptionAlgorithm: "aes-128", + PFS: "group5", + Description: "", + Lifetime: expectedLifetime, + ID: "5291b189-fd84-46e5-84bd-78f40c05d69c", + ProjectID: "b4eedccc6fb74fa8a7ad6b08382b852b", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsecpolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ipsecpolicy": { + "name": "ipsecpolicy1", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "project_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "lifetime": { + "units": "seconds", + "value": 7200 + }, + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "" + } +} + `) + }) + + actual, err := ipsecpolicies.Get(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828").Extract() + th.AssertNoErr(t, err) + expectedLifetime := ipsecpolicies.Lifetime{ + Units: "seconds", + Value: 7200, + } + expected := ipsecpolicies.Policy{ + Name: "ipsecpolicy1", + TransformProtocol: "esp", + Description: "", + AuthAlgorithm: "sha1", + EncapsulationMode: "tunnel", + EncryptionAlgorithm: "aes-128", + PFS: "group5", + Lifetime: expectedLifetime, + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + ProjectID: "b4eedccc6fb74fa8a7ad6b08382b852b", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsecpolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := ipsecpolicies.Delete(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828") + th.AssertNoErr(t, res.Err) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsecpolicies", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "ipsecpolicies": [ + { + "name": "ipsecpolicy1", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "project_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "lifetime": { + "units": "seconds", + "value": 7200 + }, + "id": "5291b189-fd84-46e5-84bd-78f40c05d69c", + "description": "" + } + ] +} + `) + }) + + count := 0 + + ipsecpolicies.List(fake.ServiceClient(), ipsecpolicies.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ipsecpolicies.ExtractPolicies(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []ipsecpolicies.Policy{ + { + Name: "ipsecpolicy1", + TransformProtocol: "esp", + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + ProjectID: "b4eedccc6fb74fa8a7ad6b08382b852b", + AuthAlgorithm: "sha1", + EncapsulationMode: "tunnel", + EncryptionAlgorithm: "aes-128", + PFS: "group5", + Lifetime: ipsecpolicies.Lifetime{ + Value: 7200, + Units: "seconds", + }, + Description: "", + ID: "5291b189-fd84-46e5-84bd-78f40c05d69c", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsecpolicies/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "ipsecpolicy":{ + "name": "updatedname", + "description": "updated policy", + "lifetime": { + "value": 7000 + } + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + + { + "ipsecpolicy": { + "name": "updatedname", + "transform_protocol": "esp", + "auth_algorithm": "sha1", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "aes-128", + "pfs": "group5", + "project_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "tenant_id": "b4eedccc6fb74fa8a7ad6b08382b852b", + "lifetime": { + "units": "seconds", + "value": 7000 + }, + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "updated policy" + } +} +`) + }) + updatedName := "updatedname" + updatedDescription := "updated policy" + options := ipsecpolicies.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + Lifetime: &ipsecpolicies.LifetimeUpdateOpts{ + Value: 7000, + }, + } + + actual, err := ipsecpolicies.Update(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828", options).Extract() + th.AssertNoErr(t, err) + expectedLifetime := ipsecpolicies.Lifetime{ + Units: "seconds", + Value: 7000, + } + expected := ipsecpolicies.Policy{ + TenantID: "b4eedccc6fb74fa8a7ad6b08382b852b", + ProjectID: "b4eedccc6fb74fa8a7ad6b08382b852b", + Name: "updatedname", + TransformProtocol: "esp", + AuthAlgorithm: "sha1", + EncapsulationMode: "tunnel", + EncryptionAlgorithm: "aes-128", + PFS: "group5", + Description: "updated policy", + Lifetime: expectedLifetime, + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + } + th.AssertDeepEquals(t, expected, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go new file mode 100644 index 000000000000..8781cc449919 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/ipsecpolicies/urls.go @@ -0,0 +1,16 @@ +package ipsecpolicies + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ipsecpolicies" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go new file mode 100644 index 000000000000..6bd3236c84a7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/doc.go @@ -0,0 +1,68 @@ +/* +Package services allows management and retrieval of VPN services in the +OpenStack Networking Service. + +Example to List Services + + listOpts := services.ListOpts{ + TenantID: "966b3c7d36a24facaf20b7e458bf2192", + } + + allPages, err := services.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPolicies, err := services.ExtractServices(allPages) + if err != nil { + panic(err) + } + + for _, service := range allServices { + fmt.Printf("%+v\n", service) + } + +Example to Create a Service + + createOpts := services.CreateOpts{ + Name: "vpnservice1", + Description: "A service", + RouterID: "2512e759-e8d7-4eea-a0af-4a85927a2e59", + AdminStateUp: gophercloud.Enabled, + } + + service, err := services.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Service + + serviceID := "38aee955-6283-4279-b091-8b9c828000ec" + + updateOpts := services.UpdateOpts{ + Description: "New Description", + } + + service, err := services.Update(networkClient, serviceID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Service + + serviceID := "38aee955-6283-4279-b091-8b9c828000ec" + err := services.Delete(networkClient, serviceID).ExtractErr() + if err != nil { + panic(err) + } + +Example to Show the details of a specific Service by ID + + service, err := services.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +*/ +package services diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go new file mode 100644 index 000000000000..8d642197e012 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/requests.go @@ -0,0 +1,150 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains all the values needed to create a new VPN service +type CreateOpts struct { + // TenantID specifies a tenant to own the VPN service. The caller must have + // an admin role in order to set this. Otherwise, this field is left unset + // and the caller will be the owner. + TenantID string `json:"tenant_id,omitempty"` + + // SubnetID is the ID of the subnet. + SubnetID string `json:"subnet_id,omitempty"` + + // RouterID is the ID of the router. + RouterID string `json:"router_id" required:"true"` + + // Description is the human readable description of the service. + Description string `json:"description,omitempty"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up"` + + // Name is the human readable name of the service. + Name string `json:"name,omitempty"` + + // The ID of the flavor. + FlavorID string `json:"flavor_id,omitempty"` +} + +// ToServiceCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToServiceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vpnservice") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// VPN service. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + return +} + +// Delete will permanently delete a particular VPN service based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating a VPN service +type UpdateOpts struct { + // Name is the human readable name of the service. + Name *string `json:"name,omitempty"` + + // Description is the human readable description of the service. + Description *string `json:"description,omitempty"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToServiceUpdateMap casts aa UodateOpts struct to a map. +func (opts UpdateOpts) ToServiceUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "vpnservice") +} + +// Update allows VPN services to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToServiceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the VPN service attributes you want to see returned. +type ListOpts struct { + TenantID string `q:"tenant_id"` + Name string `q:"name"` + Description string `q:"description"` + AdminStateUp *bool `q:"admin_state_up"` + Status string `q:"status"` + SubnetID string `q:"subnet_id"` + RouterID string `q:"router_id"` + ProjectID string `q:"project_id"` + ExternalV6IP string `q:"external_v6_ip"` + ExternalV4IP string `q:"external_v4_ip"` + FlavorID string `q:"flavor_id"` +} + +// ToServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// VPN services. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ServicePage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a particular VPN service based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go new file mode 100644 index 000000000000..5e555699fcf6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/results.go @@ -0,0 +1,121 @@ +package services + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Service is a VPN Service +type Service struct { + // TenantID is the ID of the project. + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project. + ProjectID string `json:"project_id"` + + // SubnetID is the ID of the subnet. + SubnetID string `json:"subnet_id"` + + // RouterID is the ID of the router. + RouterID string `json:"router_id"` + + // Description is a human-readable description for the resource. + // Default is an empty string + Description string `json:"description"` + + // AdminStateUp is the administrative state of the resource, which is up (true) or down (false). + AdminStateUp bool `json:"admin_state_up"` + + // Name is the human readable name of the service. + Name string `json:"name"` + + // Status indicates whether IPsec VPN service is currently operational. + // Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE. + Status string `json:"status"` + + // ID is the unique ID of the VPN service. + ID string `json:"id"` + + // ExternalV6IP is the read-only external (public) IPv6 address that is used for the VPN service. + ExternalV6IP string `json:"external_v6_ip"` + + // ExternalV4IP is the read-only external (public) IPv4 address that is used for the VPN service. + ExternalV4IP string `json:"external_v4_ip"` + + // FlavorID is the ID of the flavor. + FlavorID string `json:"flavor_id"` +} + +type commonResult struct { + gophercloud.Result +} + +// ServicePage is the page returned by a pager when traversing over a +// collection of VPN services. +type ServicePage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of VPN services has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r ServicePage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"vpnservices_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ServicePage struct is empty. +func (r ServicePage) IsEmpty() (bool, error) { + is, err := ExtractServices(r) + return len(is) == 0, err +} + +// ExtractServices accepts a Page struct, specifically a Service struct, +// and extracts the elements into a slice of Service structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractServices(r pagination.Page) ([]Service, error) { + var s struct { + Services []Service `json:"vpnservices"` + } + err := (r.(ServicePage)).ExtractInto(&s) + return s.Services, err +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Service. +type GetResult struct { + commonResult +} + +// Extract is a function that accepts a result and extracts a VPN service. +func (r commonResult) Extract() (*Service, error) { + var s struct { + Service *Service `json:"vpnservice"` + } + err := r.ExtractInto(&s) + return s.Service, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Service. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a service. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/testing/requests_test.go new file mode 100644 index 000000000000..ca7adf327cfb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/testing/requests_test.go @@ -0,0 +1,267 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/vpnservices", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vpnservice": { + "router_id": "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + "name": "vpn", + "admin_state_up": true, + "description": "OpenStack VPN service", + "tenant_id": "10039663455a446d8ba2cbb058b0f578" + } +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "vpnservice": { + "router_id": "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + "status": "PENDING_CREATE", + "name": "vpn", + "external_v6_ip": "2001:db8::1", + "admin_state_up": true, + "subnet_id": null, + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "external_v4_ip": "172.32.1.11", + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "OpenStack VPN service", + "project_id": "10039663455a446d8ba2cbb058b0f578" + } +} + `) + }) + + options := services.CreateOpts{ + TenantID: "10039663455a446d8ba2cbb058b0f578", + Name: "vpn", + Description: "OpenStack VPN service", + AdminStateUp: gophercloud.Enabled, + RouterID: "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + } + actual, err := services.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + expected := services.Service{ + RouterID: "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + Status: "PENDING_CREATE", + Name: "vpn", + ExternalV6IP: "2001:db8::1", + AdminStateUp: true, + SubnetID: "", + TenantID: "10039663455a446d8ba2cbb058b0f578", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + ExternalV4IP: "172.32.1.11", + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + Description: "OpenStack VPN service", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/vpnservices", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vpnservices":[ + { + "router_id": "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + "status": "PENDING_CREATE", + "name": "vpnservice1", + "admin_state_up": true, + "subnet_id": null, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "description": "Test VPN service" + } + ] +} + `) + }) + + count := 0 + + services.List(fake.ServiceClient(), services.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := services.ExtractServices(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expected := []services.Service{ + { + Status: "PENDING_CREATE", + Name: "vpnservice1", + AdminStateUp: true, + TenantID: "10039663455a446d8ba2cbb058b0f578", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + Description: "Test VPN service", + SubnetID: "", + RouterID: "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/vpnservices/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vpnservice": { + "router_id": "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + "status": "PENDING_CREATE", + "name": "vpnservice1", + "admin_state_up": true, + "subnet_id": null, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "VPN test service" + } +} + `) + }) + + actual, err := services.Get(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828").Extract() + th.AssertNoErr(t, err) + expected := services.Service{ + Status: "PENDING_CREATE", + Name: "vpnservice1", + Description: "VPN test service", + AdminStateUp: true, + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + TenantID: "10039663455a446d8ba2cbb058b0f578", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + RouterID: "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + SubnetID: "", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/vpnservices/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + res := services.Delete(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828") + th.AssertNoErr(t, res.Err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/vpnservices/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + "vpnservice":{ + "name": "updatedname", + "description": "updated service", + "admin_state_up": false + } +} + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "vpnservice": { + "router_id": "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + "status": "PENDING_CREATE", + "name": "updatedname", + "admin_state_up": false, + "subnet_id": null, + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "project_id": "10039663455a446d8ba2cbb058b0f578", + "id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "description": "updated service", + "external_v4_ip": "172.32.1.11", + "external_v6_ip": "2001:db8::1" + } +} + `) + }) + updatedName := "updatedname" + updatedServiceDescription := "updated service" + options := services.UpdateOpts{ + Name: &updatedName, + Description: &updatedServiceDescription, + AdminStateUp: gophercloud.Disabled, + } + + actual, err := services.Update(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828", options).Extract() + th.AssertNoErr(t, err) + expected := services.Service{ + RouterID: "66e3b16c-8ce5-40fb-bb49-ab6d8dc3f2aa", + Status: "PENDING_CREATE", + Name: "updatedname", + ExternalV6IP: "2001:db8::1", + AdminStateUp: false, + SubnetID: "", + TenantID: "10039663455a446d8ba2cbb058b0f578", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + ExternalV4IP: "172.32.1.11", + ID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + Description: "updated service", + } + th.AssertDeepEquals(t, expected, *actual) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go new file mode 100644 index 000000000000..fe8b343fe33d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/services/urls.go @@ -0,0 +1,16 @@ +package services + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "vpnservices" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go new file mode 100644 index 000000000000..66befd3ba26a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/doc.go @@ -0,0 +1,68 @@ +/* +Package siteconnections allows management and retrieval of IPSec site connections in the +OpenStack Networking Service. + + +Example to create an IPSec site connection + +createOpts := siteconnections.CreateOpts{ + Name: "Connection1", + PSK: "secret", + Initiator: siteconnections.InitiatorBiDirectional, + AdminStateUp: gophercloud.Enabled, + IPSecPolicyID: "4ab0a72e-64ef-4809-be43-c3f7e0e5239b", + PeerEPGroupID: "5f5801b1-b383-4cf0-bf61-9e85d4044b2d", + IKEPolicyID: "47a880f9-1da9-468c-b289-219c9eca78f0", + VPNServiceID: "692c1ec8-a7cd-44d9-972b-8ed3fe4cc476", + LocalEPGroupID: "498bb96a-1517-47ea-b1eb-c4a53db46a16", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + MTU: 1500, + } + connection, err := siteconnections.Create(client, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Show the details of a specific IPSec site connection by ID + + conn, err := siteconnections.Get(client, "f2b08c1e-aa81-4668-8ae1-1401bcb0576c").Extract() + if err != nil { + panic(err) + } + +Example to Delete a site connection + + connID := "38aee955-6283-4279-b091-8b9c828000ec" + err := siteconnections.Delete(networkClient, connID).ExtractErr() + if err != nil { + panic(err) + } + +Example to List site connections + + allPages, err := siteconnections.List(client, nil).AllPages() + if err != nil { + panic(err) + } + + allConnections, err := siteconnections.ExtractConnections(allPages) + if err != nil { + panic(err) + } + +Example to Update an IPSec site connection + + description := "updated connection" + name := "updatedname" + updateOpts := siteconnections.UpdateOpts{ + Name: &name, + Description: &description, + } + updatedConnection, err := siteconnections.Update(client, "5c561d9d-eaea-45f6-ae3e-08d1a7080828", updateOpts).Extract() + if err != nil { + panic(err) + } + +*/ +package siteconnections diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go new file mode 100644 index 000000000000..15ce54b5fbaf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/requests.go @@ -0,0 +1,243 @@ +package siteconnections + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToConnectionCreateMap() (map[string]interface{}, error) +} +type Action string +type Initiator string + +const ( + ActionHold Action = "hold" + ActionClear Action = "clear" + ActionRestart Action = "restart" + ActionDisabled Action = "disabled" + ActionRestartByPeer Action = "restart-by-peer" + InitiatorBiDirectional Initiator = "bi-directional" + InitiatorResponseOnly Initiator = "response-only" +) + +// DPDCreateOpts contains all the values needed to create a valid configuration for Dead Peer detection protocols +type DPDCreateOpts struct { + // The dead peer detection (DPD) action. + // A valid value is clear, hold, restart, disabled, or restart-by-peer. + // Default value is hold. + Action Action `json:"action,omitempty"` + + // The dead peer detection (DPD) timeout in seconds. + // A valid value is a positive integer that is greater than the DPD interval value. + // Default is 120. + Timeout int `json:"timeout,omitempty"` + + // The dead peer detection (DPD) interval, in seconds. + // A valid value is a positive integer. + // Default is 30. + Interval int `json:"interval,omitempty"` +} + +// CreateOpts contains all the values needed to create a new IPSec site connection +type CreateOpts struct { + // The ID of the IKE policy + IKEPolicyID string `json:"ikepolicy_id"` + + // The ID of the VPN Service + VPNServiceID string `json:"vpnservice_id"` + + // The ID for the endpoint group that contains private subnets for the local side of the connection. + // You must specify this parameter with the peer_ep_group_id parameter unless + // in backward- compatible mode where peer_cidrs is provided with a subnet_id for the VPN service. + LocalEPGroupID string `json:"local_ep_group_id,omitempty"` + + // The ID of the IPsec policy. + IPSecPolicyID string `json:"ipsecpolicy_id"` + + // The peer router identity for authentication. + // A valid value is an IPv4 address, IPv6 address, e-mail address, key ID, or FQDN. + // Typically, this value matches the peer_address value. + PeerID string `json:"peer_id"` + + // The ID of the project + TenantID string `json:"tenant_id,omitempty"` + + // The ID for the endpoint group that contains private CIDRs in the form < net_address > / < prefix > + // for the peer side of the connection. + // You must specify this parameter with the local_ep_group_id parameter unless in backward-compatible mode + // where peer_cidrs is provided with a subnet_id for the VPN service. + PeerEPGroupID string `json:"peer_ep_group_id,omitempty"` + + // An ID to be used instead of the external IP address for a virtual router used in traffic between instances on different networks in east-west traffic. + // Most often, local ID would be domain name, email address, etc. + // If this is not configured then the external IP address will be used as the ID. + LocalID string `json:"local_id,omitempty"` + + // The human readable name of the connection. + // Does not have to be unique. + // Default is an empty string + Name string `json:"name,omitempty"` + + // The human readable description of the connection. + // Does not have to be unique. + // Default is an empty string + Description string `json:"description,omitempty"` + + // The peer gateway public IPv4 or IPv6 address or FQDN. + PeerAddress string `json:"peer_address"` + + // The pre-shared key. + // A valid value is any string. + PSK string `json:"psk"` + + // Indicates whether this VPN can only respond to connections or both respond to and initiate connections. + // A valid value is response-only or bi-directional. Default is bi-directional. + Initiator Initiator `json:"initiator,omitempty"` + + // Unique list of valid peer private CIDRs in the form < net_address > / < prefix > . + PeerCIDRs []string `json:"peer_cidrs,omitempty"` + + // The administrative state of the resource, which is up (true) or down (false). + // Default is false + AdminStateUp *bool `json:"admin_state_up,omitempty"` + + // A dictionary with dead peer detection (DPD) protocol controls. + DPD *DPDCreateOpts `json:"dpd,omitempty"` + + // The maximum transmission unit (MTU) value to address fragmentation. + // Minimum value is 68 for IPv4, and 1280 for IPv6. + MTU int `json:"mtu,omitempty"` +} + +// ToConnectionCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToConnectionCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsec_site_connection") +} + +// Create accepts a CreateOpts struct and uses the values to create a new +// IPSec site connection. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToConnectionCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(rootURL(c), b, &r.Body, nil) + + return +} + +// Delete will permanently delete a particular IPSec site connection based on its +// unique ID. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(resourceURL(c, id), nil) + return +} + +// Get retrieves a particular IPSec site connection based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(resourceURL(c, id), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToConnectionListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the IPSec site connection attributes you want to see returned. +type ListOpts struct { + IKEPolicyID string `q:"ikepolicy_id"` + VPNServiceID string `q:"vpnservice_id"` + LocalEPGroupID string `q:"local_ep_group_id"` + IPSecPolicyID string `q:"ipsecpolicy_id"` + PeerID string `q:"peer_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + PeerEPGroupID string `q:"peer_ep_group_id"` + LocalID string `q:"local_id"` + Name string `q:"name"` + Description string `q:"description"` + PeerAddress string `q:"peer_address"` + PSK string `q:"psk"` + Initiator Initiator `q:"initiator"` + AdminStateUp *bool `q:"admin_state_up"` + MTU int `q:"mtu"` +} + +// ToConnectionListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToConnectionListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// IPSec site connections. It accepts a ListOpts struct, which allows you to filter +// and sort the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := rootURL(c) + if opts != nil { + query, err := opts.ToConnectionListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return ConnectionPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToConnectionUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the values used when updating the DPD of an IPSec site connection +type DPDUpdateOpts struct { + Action Action `json:"action,omitempty"` + Timeout int `json:"timeout,omitempty"` + Interval int `json:"interval,omitempty"` +} + +// UpdateOpts contains the values used when updating an IPSec site connection +type UpdateOpts struct { + Description *string `json:"description,omitempty"` + Name *string `json:"name,omitempty"` + LocalID string `json:"local_id,omitempty"` + PeerAddress string `json:"peer_address,omitempty"` + PeerID string `json:"peer_id,omitempty"` + PeerCIDRs []string `json:"peer_cidrs,omitempty"` + LocalEPGroupID string `json:"local_ep_group_id,omitempty"` + PeerEPGroupID string `json:"peer_ep_group_id,omitempty"` + MTU int `json:"mtu,omitempty"` + Initiator Initiator `json:"initiator,omitempty"` + PSK string `json:"psk,omitempty"` + DPD *DPDUpdateOpts `json:"dpd,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` +} + +// ToConnectionUpdateMap casts an UpdateOpts struct to a map. +func (opts UpdateOpts) ToConnectionUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "ipsec_site_connection") +} + +// Update allows IPSec site connections to be updated. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToConnectionUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(resourceURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go new file mode 100644 index 000000000000..3c09e4d074dd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/results.go @@ -0,0 +1,163 @@ +package siteconnections + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type DPD struct { + // Action is the dead peer detection (DPD) action. + Action string `json:"action"` + + // Timeout is the dead peer detection (DPD) timeout in seconds. + Timeout int `json:"timeout"` + + // Interval is the dead peer detection (DPD) interval in seconds. + Interval int `json:"interval"` +} + +// Connection is an IPSec site connection +type Connection struct { + // IKEPolicyID is the ID of the IKE policy. + IKEPolicyID string `json:"ikepolicy_id"` + + // VPNServiceID is the ID of the VPN service. + VPNServiceID string `json:"vpnservice_id"` + + // LocalEPGroupID is the ID for the endpoint group that contains private subnets for the local side of the connection. + LocalEPGroupID string `json:"local_ep_group_id"` + + // IPSecPolicyID is the ID of the IPSec policy + IPSecPolicyID string `json:"ipsecpolicy_id"` + + // PeerID is the peer router identity for authentication. + PeerID string `json:"peer_id"` + + // TenantID is the ID of the project. + TenantID string `json:"tenant_id"` + + // ProjectID is the ID of the project. + ProjectID string `json:"project_id"` + + // PeerEPGroupID is the ID for the endpoint group that contains private CIDRs in the form < net_address > / < prefix > + // for the peer side of the connection. + PeerEPGroupID string `json:"peer_ep_group_id"` + + // LocalID is an ID to be used instead of the external IP address for a virtual router used in traffic + // between instances on different networks in east-west traffic. + LocalID string `json:"local_id"` + + // Name is the human readable name of the connection. + Name string `json:"name"` + + // Description is the human readable description of the connection. + Description string `json:"description"` + + // PeerAddress is the peer gateway public IPv4 or IPv6 address or FQDN. + PeerAddress string `json:"peer_address"` + + // RouteMode is the route mode. + RouteMode string `json:"route_mode"` + + // PSK is the pre-shared key. + PSK string `json:"psk"` + + // Initiator indicates whether this VPN can only respond to connections or both respond to and initiate connections. + Initiator string `json:"initiator"` + + // PeerCIDRs is a unique list of valid peer private CIDRs in the form < net_address > / < prefix > . + PeerCIDRs []string `json:"peer_cidrs"` + + // AdminStateUp is the administrative state of the connection. + AdminStateUp bool `json:"admin_state_up"` + + // DPD is the dead peer detection (DPD) protocol controls. + DPD DPD `json:"dpd"` + + // AuthMode is the authentication mode. + AuthMode string `json:"auth_mode"` + + // MTU is the maximum transmission unit (MTU) value to address fragmentation. + MTU int `json:"mtu"` + + // Status indicates whether the IPsec connection is currently operational. + // Values are ACTIVE, DOWN, BUILD, ERROR, PENDING_CREATE, PENDING_UPDATE, or PENDING_DELETE. + Status string `json:"status"` + + // ID is the id of the connection + ID string `json:"id"` +} + +type commonResult struct { + gophercloud.Result +} + +// ConnectionPage is the page returned by a pager when traversing over a +// collection of IPSec site connections. +type ConnectionPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of IPSec site connections has +// reached the end of a page and the pager seeks to traverse over a new one. +// In order to do this, it needs to construct the next page's URL. +func (r ConnectionPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ipsec_site_connections_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a ConnectionPage struct is empty. +func (r ConnectionPage) IsEmpty() (bool, error) { + is, err := ExtractConnections(r) + return len(is) == 0, err +} + +// ExtractConnections accepts a Page struct, specifically a Connection struct, +// and extracts the elements into a slice of Connection structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractConnections(r pagination.Page) ([]Connection, error) { + var s struct { + Connections []Connection `json:"ipsec_site_connections"` + } + err := (r.(ConnectionPage)).ExtractInto(&s) + return s.Connections, err +} + +// Extract is a function that accepts a result and extracts an IPSec site connection. +func (r commonResult) Extract() (*Connection, error) { + var s struct { + Connection *Connection `json:"ipsec_site_connection"` + } + err := r.ExtractInto(&s) + return s.Connection, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Connection. +type CreateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the operation succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Connection. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a connection +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/testing/requests_test.go new file mode 100644 index 000000000000..3db27364df13 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/testing/requests_test.go @@ -0,0 +1,413 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsec-site-connections", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` +{ + + "ipsec_site_connection": { + "psk": "secret", + "initiator": "bi-directional", + "ipsecpolicy_id": "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + "admin_state_up": true, + "mtu": 1500, + "peer_ep_group_id": "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + "ikepolicy_id": "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + "vpnservice_id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "local_ep_group_id": "3e1815dd-e212-43d0-8f13-b494fa553e68", + "peer_address": "172.24.4.233", + "peer_id": "172.24.4.233", + "name": "vpnconnection1" + +} +} `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, ` +{ + "ipsec_site_connection": { + "status": "PENDING_CREATE", + "psk": "secret", + "initiator": "bi-directional", + "name": "vpnconnection1", + "admin_state_up": true, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "auth_mode": "psk", + "peer_cidrs": [], + "mtu": 1500, + "peer_ep_group_id": "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + "ikepolicy_id": "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + "vpnservice_id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "dpd": { + "action": "hold", + "interval": 30, + "timeout": 120 + }, + "route_mode": "static", + "ipsecpolicy_id": "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + "local_ep_group_id": "3e1815dd-e212-43d0-8f13-b494fa553e68", + "peer_address": "172.24.4.233", + "peer_id": "172.24.4.233", + "id": "851f280f-5639-4ea3-81aa-e298525ab74b", + "description": "" + } +} + `) + }) + + options := siteconnections.CreateOpts{ + Name: "vpnconnection1", + AdminStateUp: gophercloud.Enabled, + PSK: "secret", + Initiator: siteconnections.InitiatorBiDirectional, + IPSecPolicyID: "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + MTU: 1500, + PeerEPGroupID: "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + IKEPolicyID: "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + VPNServiceID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + LocalEPGroupID: "3e1815dd-e212-43d0-8f13-b494fa553e68", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + } + actual, err := siteconnections.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + expectedDPD := siteconnections.DPD{ + Action: "hold", + Interval: 30, + Timeout: 120, + } + expected := siteconnections.Connection{ + TenantID: "10039663455a446d8ba2cbb058b0f578", + Name: "vpnconnection1", + AdminStateUp: true, + PSK: "secret", + Initiator: "bi-directional", + IPSecPolicyID: "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + MTU: 1500, + PeerEPGroupID: "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + IKEPolicyID: "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + VPNServiceID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + LocalEPGroupID: "3e1815dd-e212-43d0-8f13-b494fa553e68", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + Status: "PENDING_CREATE", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + AuthMode: "psk", + PeerCIDRs: []string{}, + DPD: expectedDPD, + RouteMode: "static", + ID: "851f280f-5639-4ea3-81aa-e298525ab74b", + Description: "", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsec-site-connections/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := siteconnections.Delete(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828") + th.AssertNoErr(t, res.Err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsec-site-connections/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ipsec_site_connection": { + "status": "PENDING_CREATE", + "psk": "secret", + "initiator": "bi-directional", + "name": "vpnconnection1", + "admin_state_up": true, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "auth_mode": "psk", + "peer_cidrs": [], + "mtu": 1500, + "peer_ep_group_id": "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + "ikepolicy_id": "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + "vpnservice_id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "dpd": { + "action": "hold", + "interval": 30, + "timeout": 120 + }, + "route_mode": "static", + "ipsecpolicy_id": "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + "local_ep_group_id": "3e1815dd-e212-43d0-8f13-b494fa553e68", + "peer_address": "172.24.4.233", + "peer_id": "172.24.4.233", + "id": "851f280f-5639-4ea3-81aa-e298525ab74b", + "description": "" + } +} + `) + }) + + actual, err := siteconnections.Get(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828").Extract() + th.AssertNoErr(t, err) + expectedDPD := siteconnections.DPD{ + Action: "hold", + Interval: 30, + Timeout: 120, + } + expected := siteconnections.Connection{ + TenantID: "10039663455a446d8ba2cbb058b0f578", + Name: "vpnconnection1", + AdminStateUp: true, + PSK: "secret", + Initiator: "bi-directional", + IPSecPolicyID: "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + MTU: 1500, + PeerEPGroupID: "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + IKEPolicyID: "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + VPNServiceID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + LocalEPGroupID: "3e1815dd-e212-43d0-8f13-b494fa553e68", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + Status: "PENDING_CREATE", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + AuthMode: "psk", + PeerCIDRs: []string{}, + DPD: expectedDPD, + RouteMode: "static", + ID: "851f280f-5639-4ea3-81aa-e298525ab74b", + Description: "", + } + th.AssertDeepEquals(t, expected, *actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsec-site-connections", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "ipsec_site_connections":[ + { + "status": "PENDING_CREATE", + "psk": "secret", + "initiator": "bi-directional", + "name": "vpnconnection1", + "admin_state_up": true, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "auth_mode": "psk", + "peer_cidrs": [], + "mtu": 1500, + "peer_ep_group_id": "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + "ikepolicy_id": "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + "vpnservice_id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "dpd": { + "action": "hold", + "interval": 30, + "timeout": 120 + }, + "route_mode": "static", + "ipsecpolicy_id": "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + "local_ep_group_id": "3e1815dd-e212-43d0-8f13-b494fa553e68", + "peer_address": "172.24.4.233", + "peer_id": "172.24.4.233", + "id": "851f280f-5639-4ea3-81aa-e298525ab74b", + "description": "" + }] +} + `) + }) + + count := 0 + + siteconnections.List(fake.ServiceClient(), siteconnections.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := siteconnections.ExtractConnections(page) + if err != nil { + t.Errorf("Failed to extract members: %v", err) + return false, err + } + + expectedDPD := siteconnections.DPD{ + Action: "hold", + Interval: 30, + Timeout: 120, + } + expected := []siteconnections.Connection{ + { + TenantID: "10039663455a446d8ba2cbb058b0f578", + Name: "vpnconnection1", + AdminStateUp: true, + PSK: "secret", + Initiator: "bi-directional", + IPSecPolicyID: "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + MTU: 1500, + PeerEPGroupID: "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + IKEPolicyID: "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + VPNServiceID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + LocalEPGroupID: "3e1815dd-e212-43d0-8f13-b494fa553e68", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + Status: "PENDING_CREATE", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + AuthMode: "psk", + PeerCIDRs: []string{}, + DPD: expectedDPD, + RouteMode: "static", + ID: "851f280f-5639-4ea3-81aa-e298525ab74b", + Description: "", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/vpn/ipsec-site-connections/5c561d9d-eaea-45f6-ae3e-08d1a7080828", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "ipsec_site_connection": { + "psk": "updatedsecret", + "initiator": "response-only", + "name": "updatedconnection", + "description": "updateddescription" + } + } + `) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + + { + "ipsec_site_connection": { + "status": "ACTIVE", + "psk": "updatedsecret", + "initiator": "response-only", + "name": "updatedconnection", + "admin_state_up": true, + "project_id": "10039663455a446d8ba2cbb058b0f578", + "tenant_id": "10039663455a446d8ba2cbb058b0f578", + "auth_mode": "psk", + "peer_cidrs": [], + "mtu": 1500, + "peer_ep_group_id": "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + "ikepolicy_id": "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + "vpnservice_id": "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + "dpd": { + "action": "hold", + "interval": 30, + "timeout": 120 + }, + "route_mode": "static", + "ipsecpolicy_id": "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + "local_ep_group_id": "3e1815dd-e212-43d0-8f13-b494fa553e68", + "peer_address": "172.24.4.233", + "peer_id": "172.24.4.233", + "id": "851f280f-5639-4ea3-81aa-e298525ab74b", + "description": "updateddescription" + } +} +} +`) + }) + updatedName := "updatedconnection" + updatedDescription := "updateddescription" + options := siteconnections.UpdateOpts{ + Name: &updatedName, + Description: &updatedDescription, + Initiator: siteconnections.InitiatorResponseOnly, + PSK: "updatedsecret", + } + + actual, err := siteconnections.Update(fake.ServiceClient(), "5c561d9d-eaea-45f6-ae3e-08d1a7080828", options).Extract() + th.AssertNoErr(t, err) + + expectedDPD := siteconnections.DPD{ + Action: "hold", + Interval: 30, + Timeout: 120, + } + + expected := siteconnections.Connection{ + TenantID: "10039663455a446d8ba2cbb058b0f578", + Name: "updatedconnection", + AdminStateUp: true, + PSK: "updatedsecret", + Initiator: "response-only", + IPSecPolicyID: "e6e23d0c-9519-4d52-8ea4-5b1f96d857b1", + MTU: 1500, + PeerEPGroupID: "9ad5a7e0-6dac-41b4-b20d-a7b8645fddf1", + IKEPolicyID: "9b00d6b0-6c93-4ca5-9747-b8ade7bb514f", + VPNServiceID: "5c561d9d-eaea-45f6-ae3e-08d1a7080828", + LocalEPGroupID: "3e1815dd-e212-43d0-8f13-b494fa553e68", + PeerAddress: "172.24.4.233", + PeerID: "172.24.4.233", + Status: "ACTIVE", + ProjectID: "10039663455a446d8ba2cbb058b0f578", + AuthMode: "psk", + PeerCIDRs: []string{}, + DPD: expectedDPD, + RouteMode: "static", + ID: "851f280f-5639-4ea3-81aa-e298525ab74b", + Description: "updateddescription", + } + th.AssertDeepEquals(t, expected, *actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go new file mode 100644 index 000000000000..5c8ee9a364a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/vpnaas/siteconnections/urls.go @@ -0,0 +1,16 @@ +package siteconnections + +import "github.com/gophercloud/gophercloud" + +const ( + rootPath = "vpn" + resourcePath = "ipsec-site-connections" +) + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL(rootPath, resourcePath) +} + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL(rootPath, resourcePath, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go new file mode 100644 index 000000000000..e768b71f8203 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/doc.go @@ -0,0 +1,65 @@ +/* +Package networks contains functionality for working with Neutron network +resources. A network is an isolated virtual layer-2 broadcast domain that is +typically reserved for the tenant who created it (unless you configure the +network to be shared). Tenants can create multiple networks until the +thresholds per-tenant quota is reached. + +In the v2.0 Networking API, the network is the main entity. Ports and subnets +are always associated with a network. + +Example to List Networks + + listOpts := networks.ListOpts{ + TenantID: "a99e9b4e620e4db09a2dfb6e42a01e66", + } + + allPages, err := networks.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allNetworks, err := networks.ExtractNetworks(allPages) + if err != nil { + panic(err) + } + + for _, network := range allNetworks { + fmt.Printf("%+v", network) + } + +Example to Create a Network + + iTrue := true + createOpts := networks.CreateOpts{ + Name: "network_1", + AdminStateUp: &iTrue, + } + + network, err := networks.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + + updateOpts := networks.UpdateOpts{ + Name: "new_name", + } + + network, err := networks.Update(networkClient, networkID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Network + + networkID := "484cda0e-106f-4f4b-bb3f-d413710bbe78" + err := networks.Delete(networkClient, networkID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package networks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go new file mode 100644 index 000000000000..1fbf62092ca1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/requests.go @@ -0,0 +1,173 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToNetworkListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + Shared *bool `q:"shared"` + ID string `q:"id"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// networks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return NetworkPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific network based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents options used to create a network. +type CreateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + AvailabilityZoneHints []string `json:"availability_zone_hints,omitempty"` +} + +// ToNetworkCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. This operation does not actually require a request body, i.e. the +// CreateOpts struct argument can be empty. +// +// The tenant ID that is contained in the URI is the tenant that creates the +// network. An admin user, however, has the option of specifying another tenant +// ID in the CreateOpts struct. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents options used to update a network. +type UpdateOpts struct { + AdminStateUp *bool `json:"admin_state_up,omitempty"` + Name string `json:"name,omitempty"` + Shared *bool `json:"shared,omitempty"` +} + +// ToNetworkUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "network") +} + +// Update accepts a UpdateOpts struct and updates an existing network using the +// values provided. For more information, see the Create function. +func Update(c *gophercloud.ServiceClient, networkID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, networkID), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the network associated with it. +func Delete(c *gophercloud.ServiceClient, networkID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, networkID), nil) + return +} + +// IDFromName is a convenience function that returns a network's ID, given +// its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractNetworks(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "network"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "network"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go new file mode 100644 index 000000000000..62f4b3c3a50b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/results.go @@ -0,0 +1,118 @@ +package networks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a network resource. +func (r commonResult) Extract() (*Network, error) { + var s Network + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "network") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Network. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Network. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Network. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// Network represents, well, a network. +type Network struct { + // UUID for the network + ID string `json:"id"` + + // Human-readable name for the network. Might not be unique. + Name string `json:"name"` + + // The administrative state of network. If false (down), the network does not + // forward packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Subnets associated with this network. + Subnets []string `json:"subnets"` + + // TenantID is the project owner of the network. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the network. + ProjectID string `json:"project_id"` + + // Specifies whether the network resource can be accessed by any tenant. + Shared bool `json:"shared"` + + // Availability zone hints groups network nodes that run services like DHCP, L3, FW, and others. + // Used to make network resources highly available. + AvailabilityZoneHints []string `json:"availability_zone_hints"` +} + +// NetworkPage is the page returned by a pager when traversing over a +// collection of networks. +type NetworkPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of networks has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r NetworkPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"networks_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a NetworkPage struct is empty. +func (r NetworkPage) IsEmpty() (bool, error) { + is, err := ExtractNetworks(r) + return len(is) == 0, err +} + +// ExtractNetworks accepts a Page struct, specifically a NetworkPage struct, +// and extracts the elements into a slice of Network structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractNetworks(r pagination.Page) ([]Network, error) { + var s []Network + err := ExtractNetworksInto(r, &s) + return s, err +} + +func ExtractNetworksInto(r pagination.Page, v interface{}) error { + return r.(NetworkPage).Result.ExtractIntoSlicePtr(v, "networks") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/doc.go new file mode 100644 index 000000000000..fc8511de47cd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/doc.go @@ -0,0 +1,2 @@ +// networks unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/fixtures.go new file mode 100644 index 000000000000..9632d448a5f8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/fixtures.go @@ -0,0 +1,195 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" +) + +const ListResponse = ` +{ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "public", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": true, + "port_security_enabled": true + }, + { + "status": "ACTIVE", + "subnets": [ + "08eae331-0402-425a-923c-34f7cfe39c1b" + ], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": false, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "provider:segmentation_id": 1234567890, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": false, + "port_security_enabled": false + } + ] +}` + +const GetResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": [ + "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + ], + "name": "public", + "admin_state_up": true, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local", + "router:external": true, + "port_security_enabled": true + } +}` + +const CreateRequest = ` +{ + "network": { + "name": "private", + "admin_state_up": true + } +}` + +const CreateResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": ["08eae331-0402-425a-923c-34f7cfe39c1b"], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": false, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local" + } +}` + +const CreatePortSecurityRequest = ` +{ + "network": { + "name": "private", + "admin_state_up": true, + "port_security_enabled": false + } +}` + +const CreatePortSecurityResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": ["08eae331-0402-425a-923c-34f7cfe39c1b"], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": false, + "id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local", + "port_security_enabled": false + } +}` + +const CreateOptionalFieldsRequest = ` +{ + "network": { + "name": "public", + "admin_state_up": true, + "shared": true, + "tenant_id": "12345", + "availability_zone_hints": ["zone1", "zone2"] + } +}` + +const UpdateRequest = ` +{ + "network": { + "name": "new_network_name", + "admin_state_up": false, + "shared": true + } +}` + +const UpdateResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": [], + "name": "new_network_name", + "admin_state_up": false, + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "shared": true, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c", + "provider:segmentation_id": 1234567890, + "provider:physical_network": null, + "provider:network_type": "local" + } +}` + +const UpdatePortSecurityRequest = ` +{ + "network": { + "port_security_enabled": false + } +}` + +const UpdatePortSecurityResponse = ` +{ + "network": { + "status": "ACTIVE", + "subnets": ["08eae331-0402-425a-923c-34f7cfe39c1b"], + "name": "private", + "admin_state_up": true, + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "shared": false, + "id": "4e8e5957-649f-477b-9e5b-f1f75b21c03c", + "provider:segmentation_id": 9876543210, + "provider:physical_network": null, + "provider:network_type": "local", + "port_security_enabled": false + } +}` + +var Network1 = networks.Network{ + Status: "ACTIVE", + Subnets: []string{"54d6f61d-db07-451c-9ab3-b9609b6b6f0b"}, + Name: "public", + AdminStateUp: true, + TenantID: "4fd44f30292945e481c7b8a0c8908869", + Shared: true, + ID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", +} + +var Network2 = networks.Network{ + Status: "ACTIVE", + Subnets: []string{"08eae331-0402-425a-923c-34f7cfe39c1b"}, + Name: "private", + AdminStateUp: true, + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + Shared: false, + ID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", +} + +var ExpectedNetworkSlice = []networks.Network{Network1, Network2} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/requests_test.go new file mode 100644 index 000000000000..adf88d59ba31 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/testing/requests_test.go @@ -0,0 +1,297 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/networks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + client := fake.ServiceClient() + count := 0 + + networks.List(client, networks.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := networks.ExtractNetworks(page) + if err != nil { + t.Errorf("Failed to extract networks: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedNetworkSlice, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + client := fake.ServiceClient() + + type networkWithExt struct { + networks.Network + portsecurity.PortSecurityExt + } + + var allNetworks []networkWithExt + + allPages, err := networks.List(client, networks.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = networks.ExtractNetworksInto(allPages, &allNetworks) + th.AssertNoErr(t, err) + + th.AssertEquals(t, allNetworks[0].Status, "ACTIVE") + th.AssertEquals(t, allNetworks[0].PortSecurityEnabled, true) + th.AssertEquals(t, allNetworks[0].Subnets[0], "54d6f61d-db07-451c-9ab3-b9609b6b6f0b") + th.AssertEquals(t, allNetworks[1].Subnets[0], "08eae331-0402-425a-923c-34f7cfe39c1b") +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) + + n, err := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &Network1, n) +} + +func TestGetWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/d32019d3-bc6e-4319-9c1d-6722fc136a22", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) + + var networkWithExtensions struct { + networks.Network + portsecurity.PortSecurityExt + } + + err := networks.Get(fake.ServiceClient(), "d32019d3-bc6e-4319-9c1d-6722fc136a22").ExtractInto(&networkWithExtensions) + th.AssertNoErr(t, err) + + th.AssertEquals(t, networkWithExtensions.Status, "ACTIVE") + th.AssertEquals(t, networkWithExtensions.PortSecurityEnabled, true) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateResponse) + }) + + iTrue := true + options := networks.CreateOpts{Name: "private", AdminStateUp: &iTrue} + n, err := networks.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertDeepEquals(t, &Network2, n) +} + +func TestCreateWithOptionalFields(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateOptionalFieldsRequest) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{}`) + }) + + iTrue := true + options := networks.CreateOpts{ + Name: "public", + AdminStateUp: &iTrue, + Shared: &iTrue, + TenantID: "12345", + AvailabilityZoneHints: []string{"zone1", "zone2"}, + } + _, err := networks.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateResponse) + }) + + iTrue, iFalse := true, false + options := networks.UpdateOpts{Name: "new_network_name", AdminStateUp: &iFalse, Shared: &iTrue} + n, err := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "new_network_name") + th.AssertEquals(t, n.AdminStateUp, false) + th.AssertEquals(t, n.Shared, true) + th.AssertEquals(t, n.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := networks.Delete(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertNoErr(t, res.Err) +} + +func TestCreatePortSecurity(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreatePortSecurityRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreatePortSecurityResponse) + }) + + var networkWithExtensions struct { + networks.Network + portsecurity.PortSecurityExt + } + + iTrue := true + iFalse := false + networkCreateOpts := networks.CreateOpts{Name: "private", AdminStateUp: &iTrue} + createOpts := portsecurity.NetworkCreateOptsExt{ + CreateOptsBuilder: networkCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Create(fake.ServiceClient(), createOpts).ExtractInto(&networkWithExtensions) + th.AssertNoErr(t, err) + + th.AssertEquals(t, networkWithExtensions.Status, "ACTIVE") + th.AssertEquals(t, networkWithExtensions.PortSecurityEnabled, false) +} + +func TestUpdatePortSecurity(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/networks/4e8e5957-649f-477b-9e5b-f1f75b21c03c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdatePortSecurityRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdatePortSecurityResponse) + }) + + var networkWithExtensions struct { + networks.Network + portsecurity.PortSecurityExt + } + + iFalse := false + networkUpdateOpts := networks.UpdateOpts{} + updateOpts := portsecurity.NetworkUpdateOptsExt{ + UpdateOptsBuilder: networkUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := networks.Update(fake.ServiceClient(), "4e8e5957-649f-477b-9e5b-f1f75b21c03c", updateOpts).ExtractInto(&networkWithExtensions) + th.AssertNoErr(t, err) + + th.AssertEquals(t, networkWithExtensions.Name, "private") + th.AssertEquals(t, networkWithExtensions.AdminStateUp, true) + th.AssertEquals(t, networkWithExtensions.Shared, false) + th.AssertEquals(t, networkWithExtensions.ID, "4e8e5957-649f-477b-9e5b-f1f75b21c03c") + th.AssertEquals(t, networkWithExtensions.PortSecurityEnabled, false) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go new file mode 100644 index 000000000000..4a8fb1dc7d3c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/networks/urls.go @@ -0,0 +1,31 @@ +package networks + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("networks", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("networks") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go new file mode 100644 index 000000000000..cfb1774fb4b1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/doc.go @@ -0,0 +1,73 @@ +/* +Package ports contains functionality for working with Neutron port resources. + +A port represents a virtual switch port on a logical network switch. Virtual +instances attach their interfaces into ports. The logical port also defines +the MAC address and the IP address(es) to be assigned to the interfaces +plugged into them. When IP addresses are associated to a port, this also +implies the port is associated with a subnet, as the IP address was taken +from the allocation pool for a specific subnet. + +Example to List Ports + + listOpts := ports.ListOpts{ + DeviceID: "b0b89efe-82f8-461d-958b-adbf80f50c7d", + } + + allPages, err := ports.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allPorts, err := ports.ExtractPorts(allPages) + if err != nil { + panic(err) + } + + for _, port := range allPorts { + fmt.Printf("%+v\n", port) + } + +Example to Create a Port + + createOtps := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + port, err := ports.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + + updateOpts := ports.UpdateOpts{ + Name: "new_name", + SecurityGroups: &[]string{}, + } + + port, err := ports.Update(networkClient, portID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Port + + portID := "c34bae2b-7641-49b6-bf6d-d8e473620ed8" + err := ports.Delete(networkClient, portID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package ports diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go new file mode 100644 index 000000000000..2a5902d75df9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/requests.go @@ -0,0 +1,184 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToPortListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the port attributes you want to see returned. SortKey allows you to sort +// by a particular port attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + AdminStateUp *bool `q:"admin_state_up"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + DeviceOwner string `q:"device_owner"` + MACAddress string `q:"mac_address"` + ID string `q:"id"` + DeviceID string `q:"device_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToPortListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToPortListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// ports. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those ports that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToPortListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return PortPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific port based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToPortCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new port. +type CreateOpts struct { + NetworkID string `json:"network_id" required:"true"` + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + MACAddress string `json:"mac_address,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID string `json:"device_id,omitempty"` + DeviceOwner string `json:"device_owner,omitempty"` + TenantID string `json:"tenant_id,omitempty"` + ProjectID string `json:"project_id,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToPortCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Create accepts a CreateOpts struct and creates a new network using the values +// provided. You must remember to provide a NetworkID value. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToPortCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToPortUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing port. +type UpdateOpts struct { + Name string `json:"name,omitempty"` + AdminStateUp *bool `json:"admin_state_up,omitempty"` + FixedIPs interface{} `json:"fixed_ips,omitempty"` + DeviceID string `json:"device_id,omitempty"` + DeviceOwner string `json:"device_owner,omitempty"` + SecurityGroups *[]string `json:"security_groups,omitempty"` + AllowedAddressPairs *[]AddressPair `json:"allowed_address_pairs,omitempty"` +} + +// ToPortUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToPortUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "port") +} + +// Update accepts a UpdateOpts struct and updates an existing port using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToPortUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the port associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a port's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractPorts(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "port"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "port"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go new file mode 100644 index 000000000000..66937fd9899a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/results.go @@ -0,0 +1,143 @@ +package ports + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a port resource. +func (r commonResult) Extract() (*Port, error) { + var s Port + err := r.ExtractInto(&s) + return &s, err +} + +func (r commonResult) ExtractInto(v interface{}) error { + return r.Result.ExtractIntoStructPtr(v, "port") +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Port. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Port. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Port. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// IP is a sub-struct that represents an individual IP. +type IP struct { + SubnetID string `json:"subnet_id"` + IPAddress string `json:"ip_address,omitempty"` +} + +// AddressPair contains the IP Address and the MAC address. +type AddressPair struct { + IPAddress string `json:"ip_address,omitempty"` + MACAddress string `json:"mac_address,omitempty"` +} + +// Port represents a Neutron port. See package documentation for a top-level +// description of what this is. +type Port struct { + // UUID for the port. + ID string `json:"id"` + + // Network that this port is associated with. + NetworkID string `json:"network_id"` + + // Human-readable name for the port. Might not be unique. + Name string `json:"name"` + + // Administrative state of port. If false (down), port does not forward + // packets. + AdminStateUp bool `json:"admin_state_up"` + + // Indicates whether network is currently operational. Possible values include + // `ACTIVE', `DOWN', `BUILD', or `ERROR'. Plug-ins might define additional + // values. + Status string `json:"status"` + + // Mac address to use on this port. + MACAddress string `json:"mac_address"` + + // Specifies IP addresses for the port thus associating the port itself with + // the subnets where the IP addresses are picked from + FixedIPs []IP `json:"fixed_ips"` + + // TenantID is the project owner of the port. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the port. + ProjectID string `json:"project_id"` + + // Identifies the entity (e.g.: dhcp agent) using this port. + DeviceOwner string `json:"device_owner"` + + // Specifies the IDs of any security groups associated with a port. + SecurityGroups []string `json:"security_groups"` + + // Identifies the device (e.g., virtual server) using this port. + DeviceID string `json:"device_id"` + + // Identifies the list of IP addresses the port will recognize/accept + AllowedAddressPairs []AddressPair `json:"allowed_address_pairs"` +} + +// PortPage is the page returned by a pager when traversing over a collection +// of network ports. +type PortPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of ports has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r PortPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"ports_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a PortPage struct is empty. +func (r PortPage) IsEmpty() (bool, error) { + is, err := ExtractPorts(r) + return len(is) == 0, err +} + +// ExtractPorts accepts a Page struct, specifically a PortPage struct, +// and extracts the elements into a slice of Port structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractPorts(r pagination.Page) ([]Port, error) { + var s []Port + err := ExtractPortsInto(r, &s) + return s, err +} + +func ExtractPortsInto(r pagination.Page, v interface{}) error { + return r.(PortPage).Result.ExtractIntoSlicePtr(v, "ports") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/doc.go new file mode 100644 index 000000000000..bf82f4eb0d5d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/doc.go @@ -0,0 +1,2 @@ +// ports unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/fixtures.go new file mode 100644 index 000000000000..cfce9b091295 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/fixtures.go @@ -0,0 +1,710 @@ +package testing + +const ListResponse = ` +{ + "ports": [ + { + "status": "ACTIVE", + "binding:host_id": "devstack", + "name": "", + "admin_state_up": true, + "network_id": "70c1db1f-b701-45bd-96e0-a313ee3430b3", + "tenant_id": "", + "device_owner": "network:router_gateway", + "mac_address": "fa:16:3e:58:42:ed", + "binding:vnic_type": "normal", + "fixed_ips": [ + { + "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", + "ip_address": "172.24.4.2" + } + ], + "id": "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + "security_groups": [], + "device_id": "9ae135f4-b6e0-4dad-9e91-3c223e385824", + "port_security_enabled": false + } + ] +} +` + +const GetResponse = ` +{ + "port": { + "status": "ACTIVE", + "binding:host_id": "devstack", + "name": "", + "allowed_address_pairs": [], + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "7e02058126cc4950b75f9970368ba177", + "extra_dhcp_opts": [], + "binding:vif_details": { + "port_filter": true, + "ovs_hybrid_plug": true + }, + "binding:vif_type": "ovs", + "device_owner": "network:router_interface", + "port_security_enabled": false, + "mac_address": "fa:16:3e:23:fd:d7", + "binding:profile": {}, + "binding:vnic_type": "normal", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.1" + } + ], + "id": "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", + "security_groups": [], + "device_id": "5e3898d7-11be-483e-9732-b2f5eccd2b2e" + } +} +` + +const CreateRequest = ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": ["foo"], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ] + } +} +` + +const CreateResponse = ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "device_id": "" + } +} +` + +const CreateOmitSecurityGroupsRequest = ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ] + } +} +` + +const CreateWithNoSecurityGroupsRequest = ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": [], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ] + } +} +` + +const CreateWithNoSecurityGroupsResponse = ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "device_id": "" + } +} +` + +const CreateOmitSecurityGroupsResponse = ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "device_id": "" + } +} +` + +const CreatePortSecurityRequest = ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "private-port", + "admin_state_up": true, + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "security_groups": ["foo"], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "port_security_enabled": false + } +} +` + +const CreatePortSecurityResponse = ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "device_id": "", + "port_security_enabled": false + } +} +` + +const UpdateRequest = ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ] + } +} +` + +const UpdateResponse = ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} +` + +const UpdateOmitSecurityGroupsRequest = ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ] + } +} +` + +const UpdateOmitSecurityGroupsResponse = ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} +` + +const UpdatePortSecurityRequest = ` +{ + "port": { + "port_security_enabled": false + } +} +` + +const UpdatePortSecurityResponse = ` +{ + "port": { + "status": "DOWN", + "name": "private-port", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "device_id": "", + "port_security_enabled": false + } +} +` + +const RemoveSecurityGroupRequest = ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "security_groups": [] + } +} +` + +const RemoveSecurityGroupResponse = ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "device_id": "" + } +} +` + +const RemoveAllowedAddressPairsRequest = ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ] + } +} +` + +const RemoveAllowedAddressPairsResponse = ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} +` + +const DontUpdateAllowedAddressPairsRequest = ` +{ + "port": { + "name": "new_port_name", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ] + } +} +` + +const DontUpdateAllowedAddressPairsResponse = ` +{ + "port": { + "status": "DOWN", + "name": "new_port_name", + "admin_state_up": true, + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "allowed_address_pairs": [ + { + "ip_address": "10.0.0.4", + "mac_address": "fa:16:3e:c9:cb:f0" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "security_groups": [ + "f0ac4394-7e4a-4409-9701-ba8be283dbc3" + ], + "device_id": "" + } +} +` + +// GetWithExtraDHCPOptsResponse represents a raw port response with extra +// DHCP options. +const GetWithExtraDHCPOptsResponse = ` +{ + "port": { + "status": "ACTIVE", + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "extra_dhcp_opts": [ + { + "opt_name": "option1", + "opt_value": "value1", + "ip_version": 4 + }, + { + "opt_name": "option2", + "opt_value": "value2", + "ip_version": 4 + } + ], + "admin_state_up": true, + "name": "port-with-extra-dhcp-opts", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.4" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "device_id": "" + } +} +` + +// CreateWithExtraDHCPOptsRequest represents a raw port creation request +// with extra DHCP options. +const CreateWithExtraDHCPOptsRequest = ` +{ + "port": { + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "name": "port-with-extra-dhcp-opts", + "admin_state_up": true, + "fixed_ips": [ + { + "ip_address": "10.0.0.2", + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2" + } + ], + "extra_dhcp_opts": [ + { + "opt_name": "option1", + "opt_value": "value1" + } + ] + } +} +` + +// CreateWithExtraDHCPOptsResponse represents a raw port creation response +// with extra DHCP options. +const CreateWithExtraDHCPOptsResponse = ` +{ + "port": { + "status": "DOWN", + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "extra_dhcp_opts": [ + { + "opt_name": "option1", + "opt_value": "value1", + "ip_version": 4 + } + ], + "admin_state_up": true, + "name": "port-with-extra-dhcp-opts", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.2" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "device_id": "" + } +} +` + +// UpdateWithExtraDHCPOptsRequest represents a raw port update request with +// extra DHCP options. +const UpdateWithExtraDHCPOptsRequest = ` +{ + "port": { + "name": "updated-port-with-dhcp-opts", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "extra_dhcp_opts": [ + { + "opt_name": "option1", + "opt_value": null + }, + { + "opt_name": "option2", + "opt_value": "value2" + } + ] + } +} +` + +// UpdateWithExtraDHCPOptsResponse represents a raw port update response with +// extra DHCP options. +const UpdateWithExtraDHCPOptsResponse = ` +{ + "port": { + "status": "DOWN", + "network_id": "a87cc70a-3e15-4acf-8205-9b711a3531b7", + "tenant_id": "d6700c0c9ffa4f1cb322cd4a1f3906fa", + "extra_dhcp_opts": [ + { + "opt_name": "option2", + "opt_value": "value2", + "ip_version": 4 + } + ], + "admin_state_up": true, + "name": "updated-port-with-dhcp-opts", + "device_owner": "", + "mac_address": "fa:16:3e:c9:cb:f0", + "fixed_ips": [ + { + "subnet_id": "a0304c3a-4f08-4c43-88af-d796509c97d2", + "ip_address": "10.0.0.3" + } + ], + "id": "65c0ee9f-d634-4522-8954-51021b570b0d", + "device_id": "" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/requests_test.go new file mode 100644 index 000000000000..af91cf99d3ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/testing/requests_test.go @@ -0,0 +1,779 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/extradhcpopts" + "github.com/gophercloud/gophercloud/openstack/networking/v2/extensions/portsecurity" + "github.com/gophercloud/gophercloud/openstack/networking/v2/ports" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + count := 0 + + ports.List(fake.ServiceClient(), ports.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := ports.ExtractPorts(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []ports.Port{ + { + Status: "ACTIVE", + Name: "", + AdminStateUp: true, + NetworkID: "70c1db1f-b701-45bd-96e0-a313ee3430b3", + TenantID: "", + DeviceOwner: "network:router_gateway", + MACAddress: "fa:16:3e:58:42:ed", + FixedIPs: []ports.IP{ + { + SubnetID: "008ba151-0b8c-4a67-98b5-0d2b87666062", + IPAddress: "172.24.4.2", + }, + }, + ID: "d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b", + SecurityGroups: []string{}, + DeviceID: "9ae135f4-b6e0-4dad-9e91-3c223e385824", + }, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestListWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ListResponse) + }) + + type portWithExt struct { + ports.Port + portsecurity.PortSecurityExt + } + + var allPorts []portWithExt + + allPages, err := ports.List(fake.ServiceClient(), ports.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + + err = ports.ExtractPortsInto(allPages, &allPorts) + + th.AssertEquals(t, allPorts[0].Status, "ACTIVE") + th.AssertEquals(t, allPorts[0].PortSecurityEnabled, false) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) + + n, err := ports.Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.Name, "") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "7e02058126cc4950b75f9970368ba177") + th.AssertEquals(t, n.DeviceOwner, "network:router_interface") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:23:fd:d7") + th.AssertDeepEquals(t, n.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.1"}, + }) + th.AssertEquals(t, n.ID, "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2") + th.AssertDeepEquals(t, n.SecurityGroups, []string{}) + th.AssertEquals(t, n.Status, "ACTIVE") + th.AssertEquals(t, n.DeviceID, "5e3898d7-11be-483e-9732-b2f5eccd2b2e") +} + +func TestGetWithExtensions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetResponse) + }) + + var portWithExtensions struct { + ports.Port + portsecurity.PortSecurityExt + } + + err := ports.Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").ExtractInto(&portWithExtensions) + th.AssertNoErr(t, err) + + th.AssertEquals(t, portWithExtensions.Status, "ACTIVE") + th.AssertEquals(t, portWithExtensions.PortSecurityEnabled, false) +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateResponse) + }) + + asu := true + options := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + n, err := ports.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "DOWN") + th.AssertEquals(t, n.Name, "private-port") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, n.DeviceOwner, "") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, n.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, n.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) + th.AssertDeepEquals(t, n.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) +} + +func TestCreateOmitSecurityGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateOmitSecurityGroupsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateOmitSecurityGroupsResponse) + }) + + asu := true + options := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + n, err := ports.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "DOWN") + th.AssertEquals(t, n.Name, "private-port") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, n.DeviceOwner, "") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, n.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, n.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) + th.AssertDeepEquals(t, n.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) +} + +func TestCreateWithNoSecurityGroup(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateWithNoSecurityGroupsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateWithNoSecurityGroupsResponse) + }) + + asu := true + options := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + n, err := ports.Create(fake.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Status, "DOWN") + th.AssertEquals(t, n.Name, "private-port") + th.AssertEquals(t, n.AdminStateUp, true) + th.AssertEquals(t, n.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, n.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, n.DeviceOwner, "") + th.AssertEquals(t, n.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, n.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, n.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertDeepEquals(t, n.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) +} + +func TestRequiredCreateOpts(t *testing.T) { + res := ports.Create(fake.ServiceClient(), ports.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestCreatePortSecurity(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreatePortSecurityRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreatePortSecurityResponse) + }) + + var portWithExt struct { + ports.Port + portsecurity.PortSecurityExt + } + + asu := true + iFalse := false + portCreateOpts := ports.CreateOpts{ + Name: "private-port", + AdminStateUp: &asu, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + SecurityGroups: &[]string{"foo"}, + AllowedAddressPairs: []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + createOpts := portsecurity.PortCreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Create(fake.ServiceClient(), createOpts).ExtractInto(&portWithExt) + th.AssertNoErr(t, err) + + th.AssertEquals(t, portWithExt.Status, "DOWN") + th.AssertEquals(t, portWithExt.PortSecurityEnabled, false) +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateResponse) + }) + + options := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: &[]string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + AllowedAddressPairs: &[]ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + s, err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestUpdateOmitSecurityGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateOmitSecurityGroupsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateOmitSecurityGroupsResponse) + }) + + options := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + AllowedAddressPairs: &[]ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + s, err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestUpdatePortSecurity(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdatePortSecurityRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdatePortSecurityResponse) + }) + + var portWithExt struct { + ports.Port + portsecurity.PortSecurityExt + } + + iFalse := false + portUpdateOpts := ports.UpdateOpts{} + updateOpts := portsecurity.PortUpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + PortSecurityEnabled: &iFalse, + } + + err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&portWithExt) + th.AssertNoErr(t, err) + + th.AssertEquals(t, portWithExt.Status, "DOWN") + th.AssertEquals(t, portWithExt.Name, "private-port") + th.AssertEquals(t, portWithExt.PortSecurityEnabled, false) +} + +func TestRemoveSecurityGroups(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, RemoveSecurityGroupRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, RemoveSecurityGroupResponse) + }) + + options := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: &[]string{}, + AllowedAddressPairs: &[]ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }, + } + + s, err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string(nil)) +} + +func TestRemoveAllowedAddressPairs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, RemoveAllowedAddressPairsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, RemoveAllowedAddressPairsResponse) + }) + + options := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: &[]string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + AllowedAddressPairs: &[]ports.AddressPair{}, + } + + s, err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.AllowedAddressPairs, []ports.AddressPair(nil)) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestDontUpdateAllowedAddressPairs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, DontUpdateAllowedAddressPairsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, DontUpdateAllowedAddressPairsResponse) + }) + + options := ports.UpdateOpts{ + Name: "new_port_name", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + SecurityGroups: &[]string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}, + } + + s, err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "new_port_name") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertDeepEquals(t, s.AllowedAddressPairs, []ports.AddressPair{ + {IPAddress: "10.0.0.4", MACAddress: "fa:16:3e:c9:cb:f0"}, + }) + th.AssertDeepEquals(t, s.SecurityGroups, []string{"f0ac4394-7e4a-4409-9701-ba8be283dbc3"}) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := ports.Delete(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertNoErr(t, res.Err) +} + +func TestGetWithExtraDHCPOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, GetWithExtraDHCPOptsResponse) + }) + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + err := ports.Get(fake.ServiceClient(), "46d4bfb9-b26e-41f3-bd2e-e6dcc1ccedb2").ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, "ACTIVE") + th.AssertEquals(t, s.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, s.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, s.AdminStateUp, true) + th.AssertEquals(t, s.Name, "port-with-extra-dhcp-opts") + th.AssertEquals(t, s.DeviceOwner, "") + th.AssertEquals(t, s.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.4"}, + }) + th.AssertEquals(t, s.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertEquals(t, s.DeviceID, "") + + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptName, "option1") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptValue, "value1") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].IPVersion, 4) + th.AssertDeepEquals(t, s.ExtraDHCPOpts[1].OptName, "option2") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[1].OptValue, "value2") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[1].IPVersion, 4) +} + +func TestCreateWithExtraDHCPOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, CreateWithExtraDHCPOptsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, CreateWithExtraDHCPOptsResponse) + }) + + adminStateUp := true + portCreateOpts := ports.CreateOpts{ + Name: "port-with-extra-dhcp-opts", + AdminStateUp: &adminStateUp, + NetworkID: "a87cc70a-3e15-4acf-8205-9b711a3531b7", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }, + } + + createOpts := extradhcpopts.CreateOptsExt{ + CreateOptsBuilder: portCreateOpts, + ExtraDHCPOpts: []extradhcpopts.CreateExtraDHCPOpt{ + { + OptName: "option1", + OptValue: "value1", + }, + }, + } + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + err := ports.Create(fake.ServiceClient(), createOpts).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, "DOWN") + th.AssertEquals(t, s.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, s.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, s.AdminStateUp, true) + th.AssertEquals(t, s.Name, "port-with-extra-dhcp-opts") + th.AssertEquals(t, s.DeviceOwner, "") + th.AssertEquals(t, s.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.2"}, + }) + th.AssertEquals(t, s.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertEquals(t, s.DeviceID, "") + + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptName, "option1") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptValue, "value1") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].IPVersion, 4) +} + +func TestUpdateWithExtraDHCPOpts(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/ports/65c0ee9f-d634-4522-8954-51021b570b0d", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, UpdateWithExtraDHCPOptsRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, UpdateWithExtraDHCPOptsResponse) + }) + + portUpdateOpts := ports.UpdateOpts{ + Name: "updated-port-with-dhcp-opts", + FixedIPs: []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }, + } + + edoValue2 := "value2" + updateOpts := extradhcpopts.UpdateOptsExt{ + UpdateOptsBuilder: portUpdateOpts, + ExtraDHCPOpts: []extradhcpopts.UpdateExtraDHCPOpt{ + { + OptName: "option1", + }, + { + OptName: "option2", + OptValue: &edoValue2, + }, + }, + } + + var s struct { + ports.Port + extradhcpopts.ExtraDHCPOptsExt + } + + err := ports.Update(fake.ServiceClient(), "65c0ee9f-d634-4522-8954-51021b570b0d", updateOpts).ExtractInto(&s) + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Status, "DOWN") + th.AssertEquals(t, s.NetworkID, "a87cc70a-3e15-4acf-8205-9b711a3531b7") + th.AssertEquals(t, s.TenantID, "d6700c0c9ffa4f1cb322cd4a1f3906fa") + th.AssertEquals(t, s.AdminStateUp, true) + th.AssertEquals(t, s.Name, "updated-port-with-dhcp-opts") + th.AssertEquals(t, s.DeviceOwner, "") + th.AssertEquals(t, s.MACAddress, "fa:16:3e:c9:cb:f0") + th.AssertDeepEquals(t, s.FixedIPs, []ports.IP{ + {SubnetID: "a0304c3a-4f08-4c43-88af-d796509c97d2", IPAddress: "10.0.0.3"}, + }) + th.AssertEquals(t, s.ID, "65c0ee9f-d634-4522-8954-51021b570b0d") + th.AssertEquals(t, s.DeviceID, "") + + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptName, "option2") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].OptValue, "value2") + th.AssertDeepEquals(t, s.ExtraDHCPOpts[0].IPVersion, 4) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go new file mode 100644 index 000000000000..600d6f2fd950 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/ports/urls.go @@ -0,0 +1,31 @@ +package ports + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("ports", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("ports") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go new file mode 100644 index 000000000000..d0ed8dff06a0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/doc.go @@ -0,0 +1,133 @@ +/* +Package subnets contains functionality for working with Neutron subnet +resources. A subnet represents an IP address block that can be used to +assign IP addresses to virtual instances. Each subnet must have a CIDR and +must be associated with a network. IPs can either be selected from the whole +subnet CIDR or from allocation pools specified by the user. + +A subnet can also have a gateway, a list of DNS name servers, and host routes. +This information is pushed to instances whose interfaces are associated with +the subnet. + +Example to List Subnets + + listOpts := subnets.ListOpts{ + IPVersion: 4, + } + + allPages, err := subnets.List(networkClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allSubnets, err := subnets.ExtractSubnets(allPages) + if err != nil { + panic(err) + } + + for _, subnet := range allSubnets { + fmt.Printf("%+v\n", subnet) + } + +Example to Create a Subnet With Specified Gateway + + var gatewayIP = "192.168.199.1" + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + GatewayIP: &gatewayIP, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With No Gateway + + var noGateway = "" + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + GatewayIP: &noGateway, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Create a Subnet With a Default Gateway + + createOpts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + + subnet, err := subnets.Create(networkClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + + updateOpts := subnets.UpdateOpts{ + Name: "new_name", + DNSNameservers: []string{"8.8.8.8}, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Remove a Gateway From a Subnet + + var noGateway = "" + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + + updateOpts := subnets.UpdateOpts{ + GatewayIP: &noGateway, + } + + subnet, err := subnets.Update(networkClient, subnetID, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Subnet + + subnetID := "db77d064-e34f-4d06-b060-f21e28a61c23" + err := subnets.Delete(networkClient, subnetID).ExtractErr() + if err != nil { + panic(err) + } +*/ +package subnets diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go new file mode 100644 index 000000000000..2597947b1835 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/requests.go @@ -0,0 +1,254 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToSubnetListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the subnet attributes you want to see returned. SortKey allows you to sort +// by a particular subnet attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Name string `q:"name"` + EnableDHCP *bool `q:"enable_dhcp"` + NetworkID string `q:"network_id"` + TenantID string `q:"tenant_id"` + ProjectID string `q:"project_id"` + IPVersion int `q:"ip_version"` + GatewayIP string `q:"gateway_ip"` + CIDR string `q:"cidr"` + IPv6AddressMode string `q:"ipv6_address_mode"` + IPv6RAMode string `q:"ipv6_ra_mode"` + ID string `q:"id"` + SubnetPoolID string `q:"subnetpool_id"` + Limit int `q:"limit"` + Marker string `q:"marker"` + SortKey string `q:"sort_key"` + SortDir string `q:"sort_dir"` +} + +// ToSubnetListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSubnetListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns a Pager which allows you to iterate over a collection of +// subnets. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +// +// Default policy settings return only those subnets that are owned by the tenant +// who submits the request, unless the request is submitted by a user with +// administrative rights. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToSubnetListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + return SubnetPage{pagination.LinkedPageBase{PageResult: r}} + }) +} + +// Get retrieves a specific subnet based on its unique ID. +func Get(c *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = c.Get(getURL(c, id), &r.Body, nil) + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// List request. +type CreateOptsBuilder interface { + ToSubnetCreateMap() (map[string]interface{}, error) +} + +// CreateOpts represents the attributes used when creating a new subnet. +type CreateOpts struct { + // NetworkID is the UUID of the network the subnet will be associated with. + NetworkID string `json:"network_id" required:"true"` + + // CIDR is the address CIDR of the subnet. + CIDR string `json:"cidr,omitempty"` + + // Name is a human-readable name of the subnet. + Name string `json:"name,omitempty"` + + // The UUID of the project who owns the Subnet. Only administrative users + // can specify a project UUID other than their own. + TenantID string `json:"tenant_id,omitempty"` + + // The UUID of the project who owns the Subnet. Only administrative users + // can specify a project UUID other than their own. + ProjectID string `json:"project_id,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // IPVersion is the IP version for the subnet. + IPVersion gophercloud.IPVersion `json:"ip_version,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers []string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes []HostRoute `json:"host_routes,omitempty"` + + // The IPv6 address modes specifies mechanisms for assigning IPv6 IP addresses. + IPv6AddressMode string `json:"ipv6_address_mode,omitempty"` + + // The IPv6 router advertisement specifies whether the networking service + // should transmit ICMPv6 packets. + IPv6RAMode string `json:"ipv6_ra_mode,omitempty"` + + // SubnetPoolID is the id of the subnet pool that subnet should be associated to. + SubnetPoolID string `json:"subnetpool_id,omitempty"` +} + +// ToSubnetCreateMap builds a request body from CreateOpts. +func (opts CreateOpts) ToSubnetCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Create accepts a CreateOpts struct and creates a new subnet using the values +// provided. You must remember to provide a valid NetworkID, CIDR and IP +// version. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSubnetCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSubnetUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts represents the attributes used when updating an existing subnet. +type UpdateOpts struct { + // Name is a human-readable name of the subnet. + Name string `json:"name,omitempty"` + + // AllocationPools are IP Address pools that will be available for DHCP. + AllocationPools []AllocationPool `json:"allocation_pools,omitempty"` + + // GatewayIP sets gateway information for the subnet. Setting to nil will + // cause a default gateway to automatically be created. Setting to an empty + // string will cause the subnet to be created with no gateway. Setting to + // an explicit address will set that address as the gateway. + GatewayIP *string `json:"gateway_ip,omitempty"` + + // DNSNameservers are the nameservers to be set via DHCP. + DNSNameservers []string `json:"dns_nameservers,omitempty"` + + // HostRoutes are any static host routes to be set via DHCP. + HostRoutes *[]HostRoute `json:"host_routes,omitempty"` + + // EnableDHCP will either enable to disable the DHCP service. + EnableDHCP *bool `json:"enable_dhcp,omitempty"` +} + +// ToSubnetUpdateMap builds a request body from UpdateOpts. +func (opts UpdateOpts) ToSubnetUpdateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "subnet") + if err != nil { + return nil, err + } + + if m := b["subnet"].(map[string]interface{}); m["gateway_ip"] == "" { + m["gateway_ip"] = nil + } + + return b, nil +} + +// Update accepts a UpdateOpts struct and updates an existing subnet using the +// values provided. +func Update(c *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSubnetUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// Delete accepts a unique ID and deletes the subnet associated with it. +func Delete(c *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, id), nil) + return +} + +// IDFromName is a convenience function that returns a subnet's ID, +// given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + count := 0 + id := "" + + listOpts := ListOpts{ + Name: name, + } + + pages, err := List(client, listOpts).AllPages() + if err != nil { + return "", err + } + + all, err := ExtractSubnets(pages) + if err != nil { + return "", err + } + + for _, s := range all { + if s.Name == name { + count++ + id = s.ID + } + } + + switch count { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "subnet"} + case 1: + return id, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: count, ResourceType: "subnet"} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go new file mode 100644 index 000000000000..493e5c042e2a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/results.go @@ -0,0 +1,146 @@ +package subnets + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type commonResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts a subnet resource. +func (r commonResult) Extract() (*Subnet, error) { + var s struct { + Subnet *Subnet `json:"subnet"` + } + err := r.ExtractInto(&s) + return s.Subnet, err +} + +// CreateResult represents the result of a create operation. Call its Extract +// method to interpret it as a Subnet. +type CreateResult struct { + commonResult +} + +// GetResult represents the result of a get operation. Call its Extract +// method to interpret it as a Subnet. +type GetResult struct { + commonResult +} + +// UpdateResult represents the result of an update operation. Call its Extract +// method to interpret it as a Subnet. +type UpdateResult struct { + commonResult +} + +// DeleteResult represents the result of a delete operation. Call its +// ExtractErr method to determine if the request succeeded or failed. +type DeleteResult struct { + gophercloud.ErrResult +} + +// AllocationPool represents a sub-range of cidr available for dynamic +// allocation to ports, e.g. {Start: "10.0.0.2", End: "10.0.0.254"} +type AllocationPool struct { + Start string `json:"start"` + End string `json:"end"` +} + +// HostRoute represents a route that should be used by devices with IPs from +// a subnet (not including local subnet route). +type HostRoute struct { + DestinationCIDR string `json:"destination"` + NextHop string `json:"nexthop"` +} + +// Subnet represents a subnet. See package documentation for a top-level +// description of what this is. +type Subnet struct { + // UUID representing the subnet. + ID string `json:"id"` + + // UUID of the parent network. + NetworkID string `json:"network_id"` + + // Human-readable name for the subnet. Might not be unique. + Name string `json:"name"` + + // IP version, either `4' or `6'. + IPVersion int `json:"ip_version"` + + // CIDR representing IP range for this subnet, based on IP version. + CIDR string `json:"cidr"` + + // Default gateway used by devices in this subnet. + GatewayIP string `json:"gateway_ip"` + + // DNS name servers used by hosts in this subnet. + DNSNameservers []string `json:"dns_nameservers"` + + // Sub-ranges of CIDR available for dynamic allocation to ports. + // See AllocationPool. + AllocationPools []AllocationPool `json:"allocation_pools"` + + // Routes that should be used by devices with IPs from this subnet + // (not including local subnet route). + HostRoutes []HostRoute `json:"host_routes"` + + // Specifies whether DHCP is enabled for this subnet or not. + EnableDHCP bool `json:"enable_dhcp"` + + // TenantID is the project owner of the subnet. + TenantID string `json:"tenant_id"` + + // ProjectID is the project owner of the subnet. + ProjectID string `json:"project_id"` + + // The IPv6 address modes specifies mechanisms for assigning IPv6 IP addresses. + IPv6AddressMode string `json:"ipv6_address_mode"` + + // The IPv6 router advertisement specifies whether the networking service + // should transmit ICMPv6 packets. + IPv6RAMode string `json:"ipv6_ra_mode"` + + // SubnetPoolID is the id of the subnet pool associated with the subnet. + SubnetPoolID string `json:"subnetpool_id"` +} + +// SubnetPage is the page returned by a pager when traversing over a collection +// of subnets. +type SubnetPage struct { + pagination.LinkedPageBase +} + +// NextPageURL is invoked when a paginated collection of subnets has reached +// the end of a page and the pager seeks to traverse over a new one. In order +// to do this, it needs to construct the next page's URL. +func (r SubnetPage) NextPageURL() (string, error) { + var s struct { + Links []gophercloud.Link `json:"subnets_links"` + } + err := r.ExtractInto(&s) + if err != nil { + return "", err + } + return gophercloud.ExtractNextURL(s.Links) +} + +// IsEmpty checks whether a SubnetPage struct is empty. +func (r SubnetPage) IsEmpty() (bool, error) { + is, err := ExtractSubnets(r) + return len(is) == 0, err +} + +// ExtractSubnets accepts a Page struct, specifically a SubnetPage struct, +// and extracts the elements into a slice of Subnet structs. In other words, +// a generic collection is mapped into a relevant slice. +func ExtractSubnets(r pagination.Page) ([]Subnet, error) { + var s struct { + Subnets []Subnet `json:"subnets"` + } + err := (r.(SubnetPage)).ExtractInto(&s) + return s.Subnets, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/doc.go new file mode 100644 index 000000000000..e07714bae3e7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/doc.go @@ -0,0 +1,2 @@ +// subnets unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/fixtures.go new file mode 100644 index 000000000000..8cac911eb363 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/fixtures.go @@ -0,0 +1,556 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" +) + +const SubnetListResult = ` +{ + "subnets": [ + { + "name": "private-subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + }, + { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b" + }, + { + "name": "my_gatewayless_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.168.1.2", + "end": "192.168.1.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": null, + "cidr": "192.168.1.0/24", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0c" + }, + { + "name": "my_subnet_with_subnetpool", + "enable_dhcp": false, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.11.12.2", + "end": "10.11.12.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": null, + "cidr": "10.11.12.0/24", + "id": "38186a51-f373-4bbc-838b-6eaa1aa13eac", + "subnetpool_id": "b80340c7-9960-4f67-a99c-02501656284b" + } + ] +} +` + +var Subnet1 = subnets.Subnet{ + Name: "private-subnet", + EnableDHCP: true, + NetworkID: "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + TenantID: "26a7980765d0414dbc1fc1f88cdb7e6e", + DNSNameservers: []string{}, + AllocationPools: []subnets.AllocationPool{ + { + Start: "10.0.0.2", + End: "10.0.0.254", + }, + }, + HostRoutes: []subnets.HostRoute{}, + IPVersion: 4, + GatewayIP: "10.0.0.1", + CIDR: "10.0.0.0/24", + ID: "08eae331-0402-425a-923c-34f7cfe39c1b", +} + +var Subnet2 = subnets.Subnet{ + Name: "my_subnet", + EnableDHCP: true, + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + TenantID: "4fd44f30292945e481c7b8a0c8908869", + DNSNameservers: []string{}, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }, + HostRoutes: []subnets.HostRoute{}, + IPVersion: 4, + GatewayIP: "192.0.0.1", + CIDR: "192.0.0.0/8", + ID: "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", +} + +var Subnet3 = subnets.Subnet{ + Name: "my_gatewayless_subnet", + EnableDHCP: true, + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + TenantID: "4fd44f30292945e481c7b8a0c8908869", + DNSNameservers: []string{}, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + HostRoutes: []subnets.HostRoute{}, + IPVersion: 4, + GatewayIP: "", + CIDR: "192.168.1.0/24", + ID: "54d6f61d-db07-451c-9ab3-b9609b6b6f0c", +} + +var Subnet4 = subnets.Subnet{ + Name: "my_subnet_with_subnetpool", + EnableDHCP: false, + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + TenantID: "4fd44f30292945e481c7b8a0c8908869", + DNSNameservers: []string{}, + AllocationPools: []subnets.AllocationPool{ + { + Start: "10.11.12.2", + End: "10.11.12.254", + }, + }, + HostRoutes: []subnets.HostRoute{}, + IPVersion: 4, + GatewayIP: "", + CIDR: "10.11.12.0/24", + ID: "38186a51-f373-4bbc-838b-6eaa1aa13eac", + SubnetPoolID: "b80340c7-9960-4f67-a99c-02501656284b", +} + +const SubnetGetResult = ` +{ + "subnet": { + "name": "my_subnet", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.0.0.2", + "end": "192.255.255.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.0.0.1", + "cidr": "192.0.0.0/8", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0b", + "subnetpool_id": "b80340c7-9960-4f67-a99c-02501656284b" + } +} +` + +const SubnetCreateRequest = ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "ip_version": 4, + "gateway_ip": "192.168.199.1", + "cidr": "192.168.199.0/24", + "dns_nameservers": ["foo"], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [{"destination":"","nexthop": "bar"}], + "subnetpool_id": "b80340c7-9960-4f67-a99c-02501656284b" + } +} +` + +const SubnetCreateResult = ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "192.168.199.2", + "end": "192.168.199.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.168.199.1", + "cidr": "192.168.199.0/24", + "id": "3b80198d-4f7b-4f77-9ef5-774d54e17126", + "subnetpool_id": "b80340c7-9960-4f67-a99c-02501656284b" + } +} +` + +const SubnetCreateWithNoGatewayRequest = ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "ip_version": 4, + "cidr": "192.168.1.0/24", + "gateway_ip": null, + "allocation_pools": [ + { + "start": "192.168.1.2", + "end": "192.168.1.254" + } + ] + } +} +` + +const SubnetCreateWithNoGatewayResponse = ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "allocation_pools": [ + { + "start": "192.168.1.2", + "end": "192.168.1.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": null, + "cidr": "192.168.1.0/24", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0c" + } +} +` + +const SubnetCreateWithDefaultGatewayRequest = ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "ip_version": 4, + "cidr": "192.168.1.0/24", + "allocation_pools": [ + { + "start": "192.168.1.2", + "end": "192.168.1.254" + } + ] + } +} +` + +const SubnetCreateWithDefaultGatewayResponse = ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a23", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "allocation_pools": [ + { + "start": "192.168.1.2", + "end": "192.168.1.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "192.168.1.1", + "cidr": "192.168.1.0/24", + "id": "54d6f61d-db07-451c-9ab3-b9609b6b6f0c" + } +} +` +const SubnetCreateWithIPv6RaAddressModeRequest = ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "ip_version": 6, + "gateway_ip": "2001:db8:0:a::1", + "cidr": "2001:db8:0:a:0:0:0:0/64", + "ipv6_address_mode": "slaac", + "ipv6_ra_mode": "slaac" + } +} +` +const SubnetCreateWithIPv6RaAddressModeResponse = ` +{ + "subnet": { + "name": "", + "enable_dhcp": true, + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "tenant_id": "4fd44f30292945e481c7b8a0c8908869", + "dns_nameservers": [], + "host_routes": [], + "ip_version": 6, + "gateway_ip": "2001:db8:0:a::1", + "cidr": "2001:db8:0:a:0:0:0:0/64", + "id": "3b80198d-4f7b-4f77-9ef5-774d54e17126", + "ipv6_address_mode": "slaac", + "ipv6_ra_mode": "slaac" + } +} +` + +const SubnetCreateRequestWithNoCIDR = ` +{ + "subnet": { + "network_id": "d32019d3-bc6e-4319-9c1d-6722fc136a22", + "ip_version": 4, + "dns_nameservers": ["foo"], + "host_routes": [{"destination":"","nexthop": "bar"}], + "subnetpool_id": "b80340c7-9960-4f67-a99c-02501656284b" + } +} +` + +const SubnetUpdateRequest = ` +{ + "subnet": { + "name": "my_new_subnet", + "dns_nameservers": ["foo"], + "host_routes": [{"destination":"","nexthop": "bar"}] + } +} +` + +const SubnetUpdateResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` + +const SubnetUpdateGatewayRequest = ` +{ + "subnet": { + "name": "my_new_subnet", + "gateway_ip": "10.0.0.1" + } +} +` + +const SubnetUpdateGatewayResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` + +const SubnetUpdateRemoveGatewayRequest = ` +{ + "subnet": { + "name": "my_new_subnet", + "gateway_ip": null + } +} +` + +const SubnetUpdateRemoveGatewayResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": null, + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` + +const SubnetUpdateHostRoutesRequest = ` +{ + "subnet": { + "name": "my_new_subnet", + "host_routes": [ + { + "destination": "192.168.1.1/24", + "nexthop": "bar" + } + ] + } +} +` + +const SubnetUpdateHostRoutesResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "host_routes": [ + { + "destination": "192.168.1.1/24", + "nexthop": "bar" + } + ], + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` + +const SubnetUpdateRemoveHostRoutesRequest = ` +{ + "subnet": { + "host_routes": [] + } +} +` + +const SubnetUpdateRemoveHostRoutesResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.0.0.2", + "end": "10.0.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": null, + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` + +const SubnetUpdateAllocationPoolRequest = ` +{ + "subnet": { + "name": "my_new_subnet", + "allocation_pools": [ + { + "start": "10.1.0.2", + "end": "10.1.0.254" + } + ] + } +} +` + +const SubnetUpdateAllocationPoolResponse = ` +{ + "subnet": { + "name": "my_new_subnet", + "enable_dhcp": true, + "network_id": "db193ab3-96e3-4cb3-8fc5-05f4296d0324", + "tenant_id": "26a7980765d0414dbc1fc1f88cdb7e6e", + "dns_nameservers": [], + "allocation_pools": [ + { + "start": "10.1.0.2", + "end": "10.1.0.254" + } + ], + "host_routes": [], + "ip_version": 4, + "gateway_ip": "10.0.0.1", + "cidr": "10.0.0.0/24", + "id": "08eae331-0402-425a-923c-34f7cfe39c1b" + } +} +` diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/requests_test.go new file mode 100644 index 000000000000..bef6794b264b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/requests_test.go @@ -0,0 +1,561 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + fake "github.com/gophercloud/gophercloud/openstack/networking/v2/common" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SubnetListResult) + }) + + count := 0 + + subnets.List(fake.ServiceClient(), subnets.ListOpts{}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := subnets.ExtractSubnets(page) + if err != nil { + t.Errorf("Failed to extract subnets: %v", err) + return false, nil + } + + expected := []subnets.Subnet{ + Subnet1, + Subnet2, + Subnet3, + Subnet4, + } + + th.CheckDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/54d6f61d-db07-451c-9ab3-b9609b6b6f0b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, SubnetGetResult) + }) + + s, err := subnets.Get(fake.ServiceClient(), "54d6f61d-db07-451c-9ab3-b9609b6b6f0b").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_subnet") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "192.0.0.2", + End: "192.255.255.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []subnets.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.0.0.1") + th.AssertEquals(t, s.CIDR, "192.0.0.0/8") + th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0b") + th.AssertEquals(t, s.SubnetPoolID, "b80340c7-9960-4f67-a99c-02501656284b") +} + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetCreateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetCreateResult) + }) + + var gatewayIP = "192.168.199.1" + opts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + CIDR: "192.168.199.0/24", + GatewayIP: &gatewayIP, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }, + DNSNameservers: []string{"foo"}, + HostRoutes: []subnets.HostRoute{ + {NextHop: "bar"}, + }, + SubnetPoolID: "b80340c7-9960-4f67-a99c-02501656284b", + } + s, err := subnets.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []subnets.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.168.199.1") + th.AssertEquals(t, s.CIDR, "192.168.199.0/24") + th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126") + th.AssertEquals(t, s.SubnetPoolID, "b80340c7-9960-4f67-a99c-02501656284b") +} + +func TestCreateNoGateway(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetCreateWithNoGatewayRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetCreateWithNoGatewayResponse) + }) + + var noGateway = "" + opts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + GatewayIP: &noGateway, + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + s, err := subnets.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a23") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []subnets.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "") + th.AssertEquals(t, s.CIDR, "192.168.1.0/24") + th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0c") +} + +func TestCreateDefaultGateway(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetCreateWithDefaultGatewayRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetCreateWithDefaultGatewayResponse) + }) + + opts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a23", + IPVersion: 4, + CIDR: "192.168.1.0/24", + AllocationPools: []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }, + DNSNameservers: []string{}, + } + s, err := subnets.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a23") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "192.168.1.2", + End: "192.168.1.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []subnets.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.168.1.1") + th.AssertEquals(t, s.CIDR, "192.168.1.0/24") + th.AssertEquals(t, s.ID, "54d6f61d-db07-451c-9ab3-b9609b6b6f0c") +} + +func TestCreateIPv6RaAddressMode(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetCreateWithIPv6RaAddressModeRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetCreateWithIPv6RaAddressModeResponse) + }) + + var gatewayIP = "2001:db8:0:a::1" + opts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 6, + CIDR: "2001:db8:0:a:0:0:0:0/64", + GatewayIP: &gatewayIP, + IPv6AddressMode: "slaac", + IPv6RAMode: "slaac", + } + s, err := subnets.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertEquals(t, s.IPVersion, 6) + th.AssertEquals(t, s.GatewayIP, "2001:db8:0:a::1") + th.AssertEquals(t, s.CIDR, "2001:db8:0:a:0:0:0:0/64") + th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126") + th.AssertEquals(t, s.IPv6AddressMode, "slaac") + th.AssertEquals(t, s.IPv6RAMode, "slaac") +} + +func TestCreateWithNoCIDR(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetCreateRequestWithNoCIDR) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetCreateResult) + }) + + opts := subnets.CreateOpts{ + NetworkID: "d32019d3-bc6e-4319-9c1d-6722fc136a22", + IPVersion: 4, + DNSNameservers: []string{"foo"}, + HostRoutes: []subnets.HostRoute{ + {NextHop: "bar"}, + }, + SubnetPoolID: "b80340c7-9960-4f67-a99c-02501656284b", + } + s, err := subnets.Create(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "") + th.AssertEquals(t, s.EnableDHCP, true) + th.AssertEquals(t, s.NetworkID, "d32019d3-bc6e-4319-9c1d-6722fc136a22") + th.AssertEquals(t, s.TenantID, "4fd44f30292945e481c7b8a0c8908869") + th.AssertDeepEquals(t, s.DNSNameservers, []string{}) + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "192.168.199.2", + End: "192.168.199.254", + }, + }) + th.AssertDeepEquals(t, s.HostRoutes, []subnets.HostRoute{}) + th.AssertEquals(t, s.IPVersion, 4) + th.AssertEquals(t, s.GatewayIP, "192.168.199.1") + th.AssertEquals(t, s.CIDR, "192.168.199.0/24") + th.AssertEquals(t, s.ID, "3b80198d-4f7b-4f77-9ef5-774d54e17126") + th.AssertEquals(t, s.SubnetPoolID, "b80340c7-9960-4f67-a99c-02501656284b") +} + +func TestRequiredCreateOpts(t *testing.T) { + res := subnets.Create(fake.ServiceClient(), subnets.CreateOpts{}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = subnets.Create(fake.ServiceClient(), subnets.CreateOpts{NetworkID: "foo"}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } + + res = subnets.Create(fake.ServiceClient(), subnets.CreateOpts{NetworkID: "foo", CIDR: "bar", IPVersion: 40}) + if res.Err == nil { + t.Fatalf("Expected error, got none") + } +} + +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateResponse) + }) + + opts := subnets.UpdateOpts{ + Name: "my_new_subnet", + DNSNameservers: []string{"foo"}, + HostRoutes: &[]subnets.HostRoute{ + {NextHop: "bar"}, + }, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") +} + +func TestUpdateGateway(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateGatewayRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateGatewayResponse) + }) + + var gatewayIP = "10.0.0.1" + opts := subnets.UpdateOpts{ + Name: "my_new_subnet", + GatewayIP: &gatewayIP, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertEquals(t, s.GatewayIP, "10.0.0.1") +} + +func TestUpdateRemoveGateway(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateRemoveGatewayRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateRemoveGatewayResponse) + }) + + var noGateway = "" + opts := subnets.UpdateOpts{ + Name: "my_new_subnet", + GatewayIP: &noGateway, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertEquals(t, s.GatewayIP, "") +} + +func TestUpdateHostRoutes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateHostRoutesRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateHostRoutesResponse) + }) + + HostRoutes := []subnets.HostRoute{ + { + DestinationCIDR: "192.168.1.1/24", + NextHop: "bar", + }, + } + + opts := subnets.UpdateOpts{ + Name: "my_new_subnet", + HostRoutes: &HostRoutes, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertDeepEquals(t, s.HostRoutes, HostRoutes) +} + +func TestUpdateRemoveHostRoutes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateRemoveHostRoutesRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateRemoveHostRoutesResponse) + }) + + noHostRoutes := []subnets.HostRoute{} + opts := subnets.UpdateOpts{ + HostRoutes: &noHostRoutes, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertDeepEquals(t, s.HostRoutes, noHostRoutes) +} + +func TestUpdateAllocationPool(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, SubnetUpdateAllocationPoolRequest) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusCreated) + + fmt.Fprintf(w, SubnetUpdateAllocationPoolResponse) + }) + + opts := subnets.UpdateOpts{ + Name: "my_new_subnet", + AllocationPools: []subnets.AllocationPool{ + { + Start: "10.1.0.2", + End: "10.1.0.254", + }, + }, + } + s, err := subnets.Update(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b", opts).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "my_new_subnet") + th.AssertEquals(t, s.ID, "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertDeepEquals(t, s.AllocationPools, []subnets.AllocationPool{ + { + Start: "10.1.0.2", + End: "10.1.0.254", + }, + }) +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/v2.0/subnets/08eae331-0402-425a-923c-34f7cfe39c1b", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusNoContent) + }) + + res := subnets.Delete(fake.ServiceClient(), "08eae331-0402-425a-923c-34f7cfe39c1b") + th.AssertNoErr(t, res.Err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/results_test.go new file mode 100644 index 000000000000..a227ccde9993 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/testing/results_test.go @@ -0,0 +1,59 @@ +package testing + +import ( + "encoding/json" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/networking/v2/subnets" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestHostRoute(t *testing.T) { + sejson := []byte(` + {"subnet": { + "name": "test-subnet", + "enable_dhcp": false, + "network_id": "3e66c41e-cbbd-4019-9aab-740b7e4150a0", + "tenant_id": "f86e123198cf42d19c8854c5f80c2f06", + "dns_nameservers": [], + "gateway_ip": "172.16.0.1", + "ipv6_ra_mode": null, + "allocation_pools": [ + { + "start": "172.16.0.2", + "end": "172.16.255.254" + } + ], + "host_routes": [ + { + "destination": "172.20.1.0/24", + "nexthop": "172.16.0.2" + } + ], + "ip_version": 4, + "ipv6_address_mode": null, + "cidr": "172.16.0.0/16", + "id": "6dcaa873-7115-41af-9ef5-915f73636e43", + "subnetpool_id": null + }} +`) + + var dejson interface{} + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatalf("%s", err) + } + + resp := gophercloud.Result{Body: dejson} + var subnetWrapper struct { + Subnet subnets.Subnet `json:"subnet"` + } + err = resp.ExtractInto(&subnetWrapper) + if err != nil { + t.Fatalf("%s", err) + } + route := subnetWrapper.Subnet.HostRoutes[0] + th.AssertEquals(t, route.NextHop, "172.16.0.2") + th.AssertEquals(t, route.DestinationCIDR, "172.20.1.0/24") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go new file mode 100644 index 000000000000..7a4f2f7dd4c6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/networking/v2/subnets/urls.go @@ -0,0 +1,31 @@ +package subnets + +import "github.com/gophercloud/gophercloud" + +func resourceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("subnets", id) +} + +func rootURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("subnets") +} + +func listURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func createURL(c *gophercloud.ServiceClient) string { + return rootURL(c) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return resourceURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go new file mode 100644 index 000000000000..0fa1c083a262 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/doc.go @@ -0,0 +1,29 @@ +/* +Package accounts contains functionality for working with Object Storage +account resources. An account is the top-level resource the object storage +hierarchy: containers belong to accounts, objects belong to containers. + +Another way of thinking of an account is like a namespace for all your +resources. It is synonymous with a project or tenant in other OpenStack +services. + +Example to Get an Account + + account, err := accounts.Get(objectStorageClient, nil).Extract() + fmt.Printf("%+v\n", account) + +Example to Update an Account + + metadata := map[string]string{ + "some": "metadata", + } + + updateOpts := accounts.UpdateOpts{ + Metadata: metadata, + } + + updateResult, err := accounts.Update(objectStorageClient, updateOpts).Extract() + fmt.Printf("%+v\n", updateResult) + +*/ +package accounts diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go new file mode 100644 index 000000000000..452a331c74ca --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/requests.go @@ -0,0 +1,100 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +// GetOptsBuilder allows extensions to add additional headers to the Get +// request. +type GetOptsBuilder interface { + ToAccountGetMap() (map[string]string, error) +} + +// GetOpts is a structure that contains parameters for getting an account's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToAccountGetMap formats a GetOpts into a map[string]string of headers. +func (opts GetOpts) ToAccountGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves an account's metadata. To extract just the +// custom metadata, call the ExtractMetadata method on the GetResult. To extract +// all the headers that are returned (including the metadata), call the +// Extract method on the GetResult. +func Get(c *gophercloud.ServiceClient, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountGetMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional headers to the Update +// request. +type UpdateOptsBuilder interface { + ToAccountUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that contains parameters for updating, creating, or +// deleting an account's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + TempURLKey string `h:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `h:"X-Account-Meta-Temp-URL-Key-2"` +} + +// ToAccountUpdateMap formats an UpdateOpts into a map[string]string of headers. +func (opts UpdateOpts) ToAccountUpdateMap() (map[string]string, error) { + headers, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + headers["X-Account-Meta-"+k] = v + } + return headers, err +} + +// Update is a function that creates, updates, or deletes an account's metadata. +// To extract the headers returned, call the Extract method on the UpdateResult. +func Update(c *gophercloud.ServiceClient, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToAccountUpdateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go new file mode 100644 index 000000000000..10661e671571 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/results.go @@ -0,0 +1,180 @@ +package accounts + +import ( + "encoding/json" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" +) + +// UpdateResult is returned from a call to the Update function. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// UpdateHeader represents the headers returned in the response from an Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + Date time.Time `json:"-"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// Extract will return a struct of headers returned from a call to Get. To +// obtain a map of headers, call the Extract method on the GetResult. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + BytesUsed int64 `json:"-"` + QuotaBytes *int64 `json:"-"` + ContainerCount int64 `json:"-"` + ContentLength int64 `json:"-"` + ObjectCount int64 `json:"-"` + ContentType string `json:"Content-Type"` + TransID string `json:"X-Trans-Id"` + TempURLKey string `json:"X-Account-Meta-Temp-URL-Key"` + TempURLKey2 string `json:"X-Account-Meta-Temp-URL-Key-2"` + Date time.Time `json:"-"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Account-Bytes-Used"` + QuotaBytes string `json:"X-Account-Meta-Quota-Bytes"` + ContentLength string `json:"Content-Length"` + ContainerCount string `json:"X-Account-Container-Count"` + ObjectCount string `json:"X-Account-Object-Count"` + Date string `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.QuotaBytes { + case "": + r.QuotaBytes = nil + default: + v, err := strconv.ParseInt(s.QuotaBytes, 10, 64) + if err != nil { + return err + } + r.QuotaBytes = &v + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + switch s.ContainerCount { + case "": + r.ContainerCount = 0 + default: + r.ContainerCount, err = strconv.ParseInt(s.ContainerCount, 10, 64) + if err != nil { + return err + } + } + + if s.Date != "" { + r.Date, err = time.Parse(time.RFC1123, s.Date) + } + + return err +} + +// GetResult is returned from a call to the Get function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metatdata associated with the account. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Account-Meta-") { + key := strings.TrimPrefix(k, "X-Account-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/doc.go new file mode 100644 index 000000000000..d6ad0afdd0e8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/doc.go @@ -0,0 +1,2 @@ +// accounts unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/fixtures.go new file mode 100644 index 000000000000..e1d2182699a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/fixtures.go @@ -0,0 +1,57 @@ +package testing + +import ( + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleGetAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `Get` response. +func HandleGetAccountSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("X-Account-Container-Count", "2") + w.Header().Set("X-Account-Object-Count", "5") + w.Header().Set("X-Account-Meta-Quota-Bytes", "42") + w.Header().Set("X-Account-Bytes-Used", "14") + w.Header().Set("X-Account-Meta-Subject", "books") + w.Header().Set("Date", "Fri, 17 Jan 2014 16:09:56 GMT") + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetAccountNoQuotaSuccessfully creates an HTTP handler at `/` on the +// test handler mux that responds with a `Get` response. +func HandleGetAccountNoQuotaSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Set("X-Account-Container-Count", "2") + w.Header().Set("X-Account-Object-Count", "5") + w.Header().Set("X-Account-Bytes-Used", "14") + w.Header().Set("X-Account-Meta-Subject", "books") + w.Header().Set("Date", "Fri, 17 Jan 2014 16:09:56 GMT") + + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateAccountSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateAccountSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "X-Account-Meta-Gophercloud-Test", "accounts") + + w.Header().Set("Date", "Fri, 17 Jan 2014 16:09:56 GMT") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/requests_test.go new file mode 100644 index 000000000000..c396227e4d37 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/testing/requests_test.go @@ -0,0 +1,82 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +var ( + loc, _ = time.LoadLocation("GMT") +) + +func TestUpdateAccount(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateAccountSuccessfully(t) + + options := &accounts.UpdateOpts{Metadata: map[string]string{"gophercloud-test": "accounts"}} + res := accounts.Update(fake.ServiceClient(), options) + th.AssertNoErr(t, res.Err) + + expected := &accounts.UpdateHeader{ + Date: time.Date(2014, time.January, 17, 16, 9, 56, 0, loc), // Fri, 17 Jan 2014 16:09:56 GMT + } + actual, err := res.Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestGetAccount(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetAccountSuccessfully(t) + + expectedMetadata := map[string]string{"Subject": "books", "Quota-Bytes": "42"} + res := accounts.Get(fake.ServiceClient(), &accounts.GetOpts{}) + th.AssertNoErr(t, res.Err) + actualMetadata, _ := res.ExtractMetadata() + th.CheckDeepEquals(t, expectedMetadata, actualMetadata) + _, err := res.Extract() + th.AssertNoErr(t, err) + + var quotaBytes int64 = 42 + expected := &accounts.GetHeader{ + QuotaBytes: "aBytes, + ContainerCount: 2, + ObjectCount: 5, + BytesUsed: 14, + Date: time.Date(2014, time.January, 17, 16, 9, 56, 0, loc), // Fri, 17 Jan 2014 16:09:56 GMT + } + actual, err := res.Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestGetAccountNoQuota(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetAccountNoQuotaSuccessfully(t) + + expectedMetadata := map[string]string{"Subject": "books"} + res := accounts.Get(fake.ServiceClient(), &accounts.GetOpts{}) + th.AssertNoErr(t, res.Err) + actualMetadata, _ := res.ExtractMetadata() + th.CheckDeepEquals(t, expectedMetadata, actualMetadata) + _, err := res.Extract() + th.AssertNoErr(t, err) + + expected := &accounts.GetHeader{ + QuotaBytes: nil, + ContainerCount: 2, + ObjectCount: 5, + BytesUsed: 14, + Date: time.Date(2014, time.January, 17, 16, 9, 56, 0, loc), // Fri, 17 Jan 2014 16:09:56 GMT + } + actual, err := res.Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go new file mode 100644 index 000000000000..71540b1daf3d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts/urls.go @@ -0,0 +1,11 @@ +package accounts + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func updateURL(c *gophercloud.ServiceClient) string { + return getURL(c) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go new file mode 100644 index 000000000000..9e5f66419804 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/doc.go @@ -0,0 +1,92 @@ +/* +Package containers contains functionality for working with Object Storage +container resources. A container serves as a logical namespace for objects +that are placed inside it - an object with the same name in two different +containers represents two different objects. + +In addition to containing objects, you can also use the container to control +access to objects by using an access control list (ACL). + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "accounts" rather than "containers". This was an intentional +design in Gophercloud to make some container actions feel more natural. + +Example to List Containers + + listOpts := containers.ListOpts{ + Full: true, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to List Only Container Names + + listOpts := containers.ListOpts{ + Full: false, + } + + allPages, err := containers.List(objectStorageClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allContainers, err := containers.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, container := range allContainers { + fmt.Printf("%+v\n", container) + } + +Example to Create a Container + + createOpts := containers.CreateOpts{ + ContentType: "application/json", + Metadata: map[string]string{ + "foo": "bar", + }, + } + + container, err := containers.Create(objectStorageClient, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Update a Container + + containerName := "my_container" + + updateOpts := containers.UpdateOpts{ + Metadata: map[string]string{ + "bar": "baz", + }, + } + + container, err := containers.Update(objectStorageClient, containerName, updateOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete a Container + + containerName := "my_container" + + container, err := containers.Delete(objectStorageClient, containerName).Extract() + if err != nil { + panic(err) + } +*/ +package containers diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go new file mode 100644 index 000000000000..4799204e7070 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/requests.go @@ -0,0 +1,221 @@ +package containers + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToContainerListParams() (bool, string, error) +} + +// ListOpts is a structure that holds options for listing containers. +type ListOpts struct { + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` +} + +// ToContainerListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each container. +func (opts ListOpts) ToContainerListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves containers associated with the account as +// well as account metadata. It returns a pager which can be iterated with the +// EachPage function. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c) + if opts != nil { + full, query, err := opts.ToContainerListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ContainerPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToContainerCreateMap() (map[string]string, error) +} + +// CreateOpts is a structure that holds parameters for creating a container. +type CreateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + IfNoneMatch string `h:"If-None-Match"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerCreateMap formats a CreateOpts into a map of headers. +func (opts CreateOpts) ToContainerCreateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Create is a function that creates a new container. +func Create(c *gophercloud.ServiceClient, containerName string, opts CreateOptsBuilder) (r CreateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerCreateMap() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("PUT", createURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + resp.Body.Close() + } + r.Err = err + return +} + +// Delete is a function that deletes a container. +func Delete(c *gophercloud.ServiceClient, containerName string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, containerName), nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToContainerUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting a container's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContainerRead string `h:"X-Container-Read"` + ContainerSyncTo string `h:"X-Container-Sync-To"` + ContainerSyncKey string `h:"X-Container-Sync-Key"` + ContainerWrite string `h:"X-Container-Write"` + ContentType string `h:"Content-Type"` + DetectContentType bool `h:"X-Detect-Content-Type"` + RemoveVersionsLocation string `h:"X-Remove-Versions-Location"` + VersionsLocation string `h:"X-Versions-Location"` +} + +// ToContainerUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToContainerUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Container-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes a container's +// metadata. +func Update(c *gophercloud.ServiceClient, containerName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Request("POST", updateURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201, 202, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the Get +// request. +type GetOptsBuilder interface { + ToContainerGetMap() (map[string]string, error) +} + +// GetOpts is a structure that holds options for listing containers. +type GetOpts struct { + Newest bool `h:"X-Newest"` +} + +// ToContainerGetMap formats a GetOpts into a map of headers. +func (opts GetOpts) ToContainerGetMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Get is a function that retrieves the metadata of a container. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName string, opts GetOptsBuilder) (r GetResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToContainerGetMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + resp, err := c.Head(getURL(c, containerName), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go new file mode 100644 index 000000000000..e8c78880a4b4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/results.go @@ -0,0 +1,343 @@ +package containers + +import ( + "encoding/json" + "fmt" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Container represents a container resource. +type Container struct { + // The total number of bytes stored in the container. + Bytes int64 `json:"bytes"` + + // The total number of objects stored in the container. + Count int64 `json:"count"` + + // The name of the container. + Name string `json:"name"` +} + +// ContainerPage is the page returned by a pager when traversing over a +// collection of containers. +type ContainerPage struct { + pagination.MarkerPageBase +} + +//IsEmpty returns true if a ListResult contains no container names. +func (r ContainerPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last container name in a ListResult. +func (r ContainerPage) LastMarker() (string, error) { + names, err := ExtractNames(r) + if err != nil { + return "", err + } + if len(names) == 0 { + return "", nil + } + return names[len(names)-1], nil +} + +// ExtractInfo is a function that takes a ListResult and returns the +// containers' information. +func ExtractInfo(r pagination.Page) ([]Container, error) { + var s []Container + err := (r.(ContainerPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a ListResult and returns the +// containers' names. +func ExtractNames(page pagination.Page) ([]string, error) { + casted := page.(ContainerPage) + ct := casted.Header.Get("Content-Type") + + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(page) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, container := range parsed { + names = append(names, container.Name) + } + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(page.(ContainerPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + BytesUsed int64 `json:"-"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ObjectCount int64 `json:"-"` + Read []string `json:"-"` + TransID string `json:"X-Trans-Id"` + VersionsLocation string `json:"X-Versions-Location"` + Write []string `json:"-"` + StoragePolicy string `json:"X-Storage-Policy"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + BytesUsed string `json:"X-Container-Bytes-Used"` + ContentLength string `json:"Content-Length"` + ObjectCount string `json:"X-Container-Object-Count"` + Write string `json:"X-Container-Write"` + Read string `json:"X-Container-Read"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.BytesUsed { + case "": + r.BytesUsed = 0 + default: + r.BytesUsed, err = strconv.ParseInt(s.BytesUsed, 10, 64) + if err != nil { + return err + } + } + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch s.ObjectCount { + case "": + r.ObjectCount = 0 + default: + r.ObjectCount, err = strconv.ParseInt(s.ObjectCount, 10, 64) + if err != nil { + return err + } + } + + r.Read = strings.Split(s.Read, ",") + r.Write = strings.Split(s.Write, ",") + + r.Date = time.Time(s.Date) + + return err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the container. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Container-Meta-") { + key := strings.TrimPrefix(k, "X-Container-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a Create +// request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// CreateResult represents the result of a create operation. To extract the +// the headers from the HTTP response, call its Extract method. +type CreateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +// To extract the headers from the HTTP response, call its Extract method. +func (r CreateResult) Extract() (*CreateHeader, error) { + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a Update +// request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// UpdateResult represents the result of an update operation. To extract the +// the headers from the HTTP response, call its Extract method. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a Delete +// request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return err +} + +// DeleteResult represents the result of a delete operation. To extract the +// the headers from the HTTP response, call its Extract method. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/doc.go new file mode 100644 index 000000000000..a39f42b41f78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/doc.go @@ -0,0 +1,2 @@ +// containers unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/fixtures.go new file mode 100644 index 000000000000..5915323ad482 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/fixtures.go @@ -0,0 +1,155 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// ExpectedListInfo is the result expected from a call to `List` when full +// info is requested. +var ExpectedListInfo = []containers.Container{ + { + Count: 0, + Bytes: 0, + Name: "janeausten", + }, + { + Count: 1, + Bytes: 14, + Name: "marktwain", + }, +} + +// ExpectedListNames is the result expected from a call to `List` when just +// container names are requested. +var ExpectedListNames = []string{"janeausten", "marktwain"} + +// HandleListContainerInfoSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `List` response when full info is requested. +func HandleListContainerInfoSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, `[ + { + "count": 0, + "bytes": 0, + "name": "janeausten" + }, + { + "count": 1, + "bytes": 14, + "name": "marktwain" + } + ]`) + case "janeausten": + fmt.Fprintf(w, `[ + { + "count": 1, + "bytes": 14, + "name": "marktwain" + } + ]`) + case "marktwain": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleListContainerNamesSuccessfully creates an HTTP handler at `/` on the test handler mux that +// responds with a `ListNames` response when only container names are requested. +func HandleListContainerNamesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "text/plain") + + w.Header().Set("Content-Type", "text/plain") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, "janeausten\nmarktwain\n") + case "janeausten": + fmt.Fprintf(w, "marktwain\n") + case "marktwain": + fmt.Fprintf(w, ``) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Create` response. +func HandleCreateContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Add("X-Container-Meta-Foo", "bar") + w.Header().Set("Content-Length", "0") + w.Header().Set("Content-Type", "text/html; charset=UTF-8") + w.Header().Set("Date", "Wed, 17 Aug 2016 19:25:43 GMT") + w.Header().Set("X-Trans-Id", "tx554ed59667a64c61866f1-0058b4ba37") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleDeleteContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Delete` response. +func HandleDeleteContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleGetContainerSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `Get` response. +func HandleGetContainerSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.Header().Set("Accept-Ranges", "bytes") + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("Date", "Wed, 17 Aug 2016 19:25:43 GMT") + w.Header().Set("X-Container-Bytes-Used", "100") + w.Header().Set("X-Container-Object-Count", "4") + w.Header().Set("X-Container-Read", "test") + w.Header().Set("X-Container-Write", "test2,user4") + w.Header().Set("X-Timestamp", "1471298837.95721") + w.Header().Set("X-Trans-Id", "tx554ed59667a64c61866f1-0057b4ba37") + w.Header().Set("X-Storage-Policy", "test_policy") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/requests_test.go new file mode 100644 index 000000000000..c5eabd29f3a2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/testing/requests_test.go @@ -0,0 +1,148 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +var ( + metadata = map[string]string{"gophercloud-test": "containers"} + loc, _ = time.LoadLocation("GMT") +) + +func TestListContainerInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerInfoSuccessfully(t) + + count := 0 + err := containers.List(fake.ServiceClient(), &containers.ListOpts{Full: true}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := containers.ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAllContainerInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerInfoSuccessfully(t) + + allPages, err := containers.List(fake.ServiceClient(), &containers.ListOpts{Full: true}).AllPages() + th.AssertNoErr(t, err) + actual, err := containers.ExtractInfo(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedListInfo, actual) +} + +func TestListContainerNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerNamesSuccessfully(t) + + count := 0 + err := containers.List(fake.ServiceClient(), &containers.ListOpts{Full: false}).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := containers.ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListAllContainerNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListContainerNamesSuccessfully(t) + + allPages, err := containers.List(fake.ServiceClient(), &containers.ListOpts{Full: false}).AllPages() + th.AssertNoErr(t, err) + actual, err := containers.ExtractNames(allPages) + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, ExpectedListNames, actual) +} + +func TestCreateContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateContainerSuccessfully(t) + + options := containers.CreateOpts{ContentType: "application/json", Metadata: map[string]string{"foo": "bar"}} + res := containers.Create(fake.ServiceClient(), "testContainer", options) + th.CheckEquals(t, "bar", res.Header["X-Container-Meta-Foo"][0]) + + expected := &containers.CreateHeader{ + ContentLength: 0, + ContentType: "text/html; charset=UTF-8", + Date: time.Date(2016, time.August, 17, 19, 25, 43, 0, loc), //Wed, 17 Aug 2016 19:25:43 GMT + TransID: "tx554ed59667a64c61866f1-0058b4ba37", + } + actual, err := res.Extract() + th.CheckNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} + +func TestDeleteContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteContainerSuccessfully(t) + + res := containers.Delete(fake.ServiceClient(), "testContainer") + th.CheckNoErr(t, res.Err) +} + +func TestUpdateContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateContainerSuccessfully(t) + + options := &containers.UpdateOpts{Metadata: map[string]string{"foo": "bar"}} + res := containers.Update(fake.ServiceClient(), "testContainer", options) + th.CheckNoErr(t, res.Err) +} + +func TestGetContainer(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetContainerSuccessfully(t) + + getOpts := containers.GetOpts{ + Newest: true, + } + res := containers.Get(fake.ServiceClient(), "testContainer", getOpts) + _, err := res.ExtractMetadata() + th.CheckNoErr(t, err) + + expected := &containers.GetHeader{ + AcceptRanges: "bytes", + BytesUsed: 100, + ContentType: "application/json; charset=utf-8", + Date: time.Date(2016, time.August, 17, 19, 25, 43, 0, loc), //Wed, 17 Aug 2016 19:25:43 GMT + ObjectCount: 4, + Read: []string{"test"}, + TransID: "tx554ed59667a64c61866f1-0057b4ba37", + Write: []string{"test2", "user4"}, + StoragePolicy: "test_policy", + } + actual, err := res.Extract() + th.CheckNoErr(t, err) + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go new file mode 100644 index 000000000000..9b380470dd78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/containers/urls.go @@ -0,0 +1,23 @@ +package containers + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} + +func createURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func getURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func deleteURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} + +func updateURL(c *gophercloud.ServiceClient, container string) string { + return createURL(c, container) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go new file mode 100644 index 000000000000..e9b4b8a9f321 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/doc.go @@ -0,0 +1,106 @@ +/* +Package objects contains functionality for working with Object Storage +object resources. An object is a resource that represents and contains data +- such as documents, images, and so on. You can also store custom metadata +with an object. + +Note: When referencing the Object Storage API docs, some of the API actions +are listed under "containers" rather than "objects". This was an intentional +design in Gophercloud to make some object actions feel more natural. + +Example to List Objects + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: true, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractInfo(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to List Object Names + + containerName := "my_container" + + listOpts := objects.ListOpts{ + Full: false, + } + + allPages, err := objects.List(objectStorageClient, containerName, listOpts).AllPages() + if err != nil { + panic(err) + } + + allObjects, err := objects.ExtractNames(allPages) + if err != nil { + panic(err) + } + + for _, object := range allObjects { + fmt.Printf("%+v\n", object) + } + +Example to Create an Object + + content := "some object content" + objectName := "my_object" + containerName := "my_container" + + createOpts := objects.CreateOpts{ + ContentType: "text/plain" + Content: strings.NewReader(content), + } + + object, err := objects.Create(objectStorageClient, containerName, objectName, createOpts).Extract() + if err != nil { + panic(err) + } + +Example to Copy an Object + + objectName := "my_object" + containerName := "my_container" + + copyOpts := objects.CopyOpts{ + Destination: "/newContainer/newObject", + } + + object, err := objects.Copy(objectStorageClient, containerName, objectName, copyOpts).Extract() + if err != nil { + panic(err) + } + +Example to Delete an Object + + objectName := "my_object" + containerName := "my_container" + + object, err := objects.Delete(objectStorageClient, containerName, objectName).Extract() + if err != nil { + panic(err) + } + +Example to Download an Object's Data + + objectName := "my_object" + containerName := "my_container" + + object := objects.Download(objectStorageClient, containerName, objectName, nil) + content, err := object.ExtractContent() + if err != nil { + panic(err) + } +*/ +package objects diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go new file mode 100644 index 000000000000..5c4ae44d3176 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/errors.go @@ -0,0 +1,13 @@ +package objects + +import "github.com/gophercloud/gophercloud" + +// ErrWrongChecksum is the error when the checksum generated for an object +// doesn't match the ETAG header. +type ErrWrongChecksum struct { + gophercloud.BaseError +} + +func (e ErrWrongChecksum) Error() string { + return "Local checksum does not match API ETag header" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go new file mode 100644 index 000000000000..7325cd7d0b9b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/requests.go @@ -0,0 +1,499 @@ +package objects + +import ( + "bytes" + "crypto/hmac" + "crypto/md5" + "crypto/sha1" + "fmt" + "io" + "io/ioutil" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/accounts" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToObjectListParams() (bool, string, error) +} + +// ListOpts is a structure that holds parameters for listing objects. +type ListOpts struct { + // Full is a true/false value that represents the amount of object information + // returned. If Full is set to true, then the content-type, number of bytes, + // hash date last modified, and name are returned. If set to false or not set, + // then only the object names are returned. + Full bool + Limit int `q:"limit"` + Marker string `q:"marker"` + EndMarker string `q:"end_marker"` + Format string `q:"format"` + Prefix string `q:"prefix"` + Delimiter string `q:"delimiter"` + Path string `q:"path"` +} + +// ToObjectListParams formats a ListOpts into a query string and boolean +// representing whether to list complete information for each object. +func (opts ListOpts) ToObjectListParams() (bool, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Full, q.String(), err +} + +// List is a function that retrieves all objects in a container. It also returns +// the details for the container. To extract only the object information or names, +// pass the ListResult response to the ExtractInfo or ExtractNames function, +// respectively. +func List(c *gophercloud.ServiceClient, containerName string, opts ListOptsBuilder) pagination.Pager { + headers := map[string]string{"Accept": "text/plain", "Content-Type": "text/plain"} + + url := listURL(c, containerName) + if opts != nil { + full, query, err := opts.ToObjectListParams() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + + if full { + headers = map[string]string{"Accept": "application/json", "Content-Type": "application/json"} + } + } + + pager := pagination.NewPager(c, url, func(r pagination.PageResult) pagination.Page { + p := ObjectPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) + pager.Headers = headers + return pager +} + +// DownloadOptsBuilder allows extensions to add additional parameters to the +// Download request. +type DownloadOptsBuilder interface { + ToObjectDownloadParams() (map[string]string, string, error) +} + +// DownloadOpts is a structure that holds parameters for downloading an object. +type DownloadOpts struct { + IfMatch string `h:"If-Match"` + IfModifiedSince time.Time `h:"If-Modified-Since"` + IfNoneMatch string `h:"If-None-Match"` + IfUnmodifiedSince time.Time `h:"If-Unmodified-Since"` + Newest bool `h:"X-Newest"` + Range string `h:"Range"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectDownloadParams formats a DownloadOpts into a query string and map of +// headers. +func (opts DownloadOpts) ToObjectDownloadParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Download is a function that retrieves the content and metadata for an object. +// To extract just the content, pass the DownloadResult response to the +// ExtractContent function. +func Download(c *gophercloud.ServiceClient, containerName, objectName string, opts DownloadOptsBuilder) (r DownloadResult) { + url := downloadURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectDownloadParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 206, 304}, + }) + if resp != nil { + r.Header = resp.Header + r.Body = resp.Body + } + r.Err = err + return +} + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToObjectCreateParams() (io.Reader, map[string]string, string, error) +} + +// CreateOpts is a structure that holds parameters for creating an object. +type CreateOpts struct { + Content io.Reader + Metadata map[string]string + NoETag bool + CacheControl string `h:"Cache-Control"` + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentLength int64 `h:"Content-Length"` + ContentType string `h:"Content-Type"` + CopyFrom string `h:"X-Copy-From"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType string `h:"X-Detect-Content-Type"` + ETag string `h:"ETag"` + IfNoneMatch string `h:"If-None-Match"` + ObjectManifest string `h:"X-Object-Manifest"` + TransferEncoding string `h:"Transfer-Encoding"` + Expires string `q:"expires"` + MultipartManifest string `q:"multipart-manifest"` + Signature string `q:"signature"` +} + +// ToObjectCreateParams formats a CreateOpts into a query string and map of +// headers. +func (opts CreateOpts) ToObjectCreateParams() (io.Reader, map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, nil, "", err + } + + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + + if opts.NoETag { + delete(h, "etag") + return opts.Content, h, q.String(), nil + } + + if h["ETag"] != "" { + return opts.Content, h, q.String(), nil + } + + // When we're dealing with big files an io.ReadSeeker allows us to efficiently calculate + // the md5 sum. An io.Reader is only readable once which means we have to copy the entire + // file content into memory first. + readSeeker, isReadSeeker := opts.Content.(io.ReadSeeker) + if !isReadSeeker { + data, err := ioutil.ReadAll(opts.Content) + if err != nil { + return nil, nil, "", err + } + readSeeker = bytes.NewReader(data) + } + + hash := md5.New() + // io.Copy into md5 is very efficient as it's done in small chunks. + if _, err := io.Copy(hash, readSeeker); err != nil { + return nil, nil, "", err + } + readSeeker.Seek(0, io.SeekStart) + + h["ETag"] = fmt.Sprintf("%x", hash.Sum(nil)) + + return readSeeker, h, q.String(), nil +} + +// Create is a function that creates a new object or replaces an existing +// object. If the returned response's ETag header fails to match the local +// checksum, the failed request will automatically be retried up to a maximum +// of 3 times. +func Create(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateOptsBuilder) (r CreateResult) { + url := createURL(c, containerName, objectName) + h := make(map[string]string) + var b io.Reader + if opts != nil { + tmpB, headers, query, err := opts.ToObjectCreateParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + b = tmpB + } + + resp, err := c.Put(url, nil, nil, &gophercloud.RequestOpts{ + RawBody: b, + MoreHeaders: h, + }) + r.Err = err + if resp != nil { + r.Header = resp.Header + } + return +} + +// CopyOptsBuilder allows extensions to add additional parameters to the +// Copy request. +type CopyOptsBuilder interface { + ToObjectCopyMap() (map[string]string, error) +} + +// CopyOpts is a structure that holds parameters for copying one object to +// another. +type CopyOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + Destination string `h:"Destination" required:"true"` +} + +// ToObjectCopyMap formats a CopyOpts into a map of headers. +func (opts CopyOpts) ToObjectCopyMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Copy is a function that copies one object to another. +func Copy(c *gophercloud.ServiceClient, containerName, objectName string, opts CopyOptsBuilder) (r CopyResult) { + h := make(map[string]string) + headers, err := opts.ToObjectCopyMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + + url := copyURL(c, containerName, objectName) + resp, err := c.Request("COPY", url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{201}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// DeleteOptsBuilder allows extensions to add additional parameters to the +// Delete request. +type DeleteOptsBuilder interface { + ToObjectDeleteQuery() (string, error) +} + +// DeleteOpts is a structure that holds parameters for deleting an object. +type DeleteOpts struct { + MultipartManifest string `q:"multipart-manifest"` +} + +// ToObjectDeleteQuery formats a DeleteOpts into a query string. +func (opts DeleteOpts) ToObjectDeleteQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// Delete is a function that deletes an object. +func Delete(c *gophercloud.ServiceClient, containerName, objectName string, opts DeleteOptsBuilder) (r DeleteResult) { + url := deleteURL(c, containerName, objectName) + if opts != nil { + query, err := opts.ToObjectDeleteQuery() + if err != nil { + r.Err = err + return + } + url += query + } + resp, err := c.Delete(url, nil) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// GetOptsBuilder allows extensions to add additional parameters to the +// Get request. +type GetOptsBuilder interface { + ToObjectGetParams() (map[string]string, string, error) +} + +// GetOpts is a structure that holds parameters for getting an object's +// metadata. +type GetOpts struct { + Newest bool `h:"X-Newest"` + Expires string `q:"expires"` + Signature string `q:"signature"` +} + +// ToObjectGetParams formats a GetOpts into a query string and a map of headers. +func (opts GetOpts) ToObjectGetParams() (map[string]string, string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return nil, "", err + } + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, q.String(), err + } + return h, q.String(), nil +} + +// Get is a function that retrieves the metadata of an object. To extract just +// the custom metadata, pass the GetResult response to the ExtractMetadata +// function. +func Get(c *gophercloud.ServiceClient, containerName, objectName string, opts GetOptsBuilder) (r GetResult) { + url := getURL(c, containerName, objectName) + h := make(map[string]string) + if opts != nil { + headers, query, err := opts.ToObjectGetParams() + if err != nil { + r.Err = err + return + } + for k, v := range headers { + h[k] = v + } + url += query + } + + resp, err := c.Head(url, &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200, 204}, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToObjectUpdateMap() (map[string]string, error) +} + +// UpdateOpts is a structure that holds parameters for updating, creating, or +// deleting an object's metadata. +type UpdateOpts struct { + Metadata map[string]string + ContentDisposition string `h:"Content-Disposition"` + ContentEncoding string `h:"Content-Encoding"` + ContentType string `h:"Content-Type"` + DeleteAfter int `h:"X-Delete-After"` + DeleteAt int `h:"X-Delete-At"` + DetectContentType bool `h:"X-Detect-Content-Type"` +} + +// ToObjectUpdateMap formats a UpdateOpts into a map of headers. +func (opts UpdateOpts) ToObjectUpdateMap() (map[string]string, error) { + h, err := gophercloud.BuildHeaders(opts) + if err != nil { + return nil, err + } + for k, v := range opts.Metadata { + h["X-Object-Meta-"+k] = v + } + return h, nil +} + +// Update is a function that creates, updates, or deletes an object's metadata. +func Update(c *gophercloud.ServiceClient, containerName, objectName string, opts UpdateOptsBuilder) (r UpdateResult) { + h := make(map[string]string) + if opts != nil { + headers, err := opts.ToObjectUpdateMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + url := updateURL(c, containerName, objectName) + resp, err := c.Post(url, nil, nil, &gophercloud.RequestOpts{ + MoreHeaders: h, + }) + if resp != nil { + r.Header = resp.Header + } + r.Err = err + return +} + +// HTTPMethod represents an HTTP method string (e.g. "GET"). +type HTTPMethod string + +var ( + // GET represents an HTTP "GET" method. + GET HTTPMethod = "GET" + + // POST represents an HTTP "POST" method. + POST HTTPMethod = "POST" +) + +// CreateTempURLOpts are options for creating a temporary URL for an object. +type CreateTempURLOpts struct { + // (REQUIRED) Method is the HTTP method to allow for users of the temp URL. + // Valid values are "GET" and "POST". + Method HTTPMethod + + // (REQUIRED) TTL is the number of seconds the temp URL should be active. + TTL int + + // (Optional) Split is the string on which to split the object URL. Since only + // the object path is used in the hash, the object URL needs to be parsed. If + // empty, the default OpenStack URL split point will be used ("/v1/"). + Split string +} + +// CreateTempURL is a function for creating a temporary URL for an object. It +// allows users to have "GET" or "POST" access to a particular tenant's object +// for a limited amount of time. +func CreateTempURL(c *gophercloud.ServiceClient, containerName, objectName string, opts CreateTempURLOpts) (string, error) { + if opts.Split == "" { + opts.Split = "/v1/" + } + duration := time.Duration(opts.TTL) * time.Second + expiry := time.Now().Add(duration).Unix() + getHeader, err := accounts.Get(c, nil).Extract() + if err != nil { + return "", err + } + secretKey := []byte(getHeader.TempURLKey) + url := getURL(c, containerName, objectName) + splitPath := strings.Split(url, opts.Split) + baseURL, objectPath := splitPath[0], splitPath[1] + objectPath = opts.Split + objectPath + body := fmt.Sprintf("%s\n%d\n%s", opts.Method, expiry, objectPath) + hash := hmac.New(sha1.New, secretKey) + hash.Write([]byte(body)) + hexsum := fmt.Sprintf("%x", hash.Sum(nil)) + return fmt.Sprintf("%s%s?temp_url_sig=%s&temp_url_expires=%d", baseURL, objectPath, hexsum, expiry), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go new file mode 100644 index 000000000000..dd7c7044d0e3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/results.go @@ -0,0 +1,580 @@ +package objects + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "strconv" + "strings" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Object is a structure that holds information related to a storage object. +type Object struct { + // Bytes is the total number of bytes that comprise the object. + Bytes int64 `json:"bytes"` + + // ContentType is the content type of the object. + ContentType string `json:"content_type"` + + // Hash represents the MD5 checksum value of the object's content. + Hash string `json:"hash"` + + // LastModified is the time the object was last modified. + LastModified time.Time `json:"-"` + + // Name is the unique name for the object. + Name string `json:"name"` + + // Subdir denotes if the result contains a subdir. + Subdir string `json:"subdir"` +} + +func (r *Object) UnmarshalJSON(b []byte) error { + type tmp Object + var s *struct { + tmp + LastModified string `json:"last_modified"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Object(s.tmp) + + if s.LastModified != "" { + t, err := time.Parse(gophercloud.RFC3339MilliNoZ, s.LastModified) + if err != nil { + t, err = time.Parse(gophercloud.RFC3339Milli, s.LastModified) + if err != nil { + return err + } + } + r.LastModified = t + } + + return nil +} + +// ObjectPage is a single page of objects that is returned from a call to the +// List function. +type ObjectPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a ListResult contains no object names. +func (r ObjectPage) IsEmpty() (bool, error) { + names, err := ExtractNames(r) + return len(names) == 0, err +} + +// LastMarker returns the last object name in a ListResult. +func (r ObjectPage) LastMarker() (string, error) { + return extractLastMarker(r) +} + +// ExtractInfo is a function that takes a page of objects and returns their +// full information. +func ExtractInfo(r pagination.Page) ([]Object, error) { + var s []Object + err := (r.(ObjectPage)).ExtractInto(&s) + return s, err +} + +// ExtractNames is a function that takes a page of objects and returns only +// their names. +func ExtractNames(r pagination.Page) ([]string, error) { + casted := r.(ObjectPage) + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return nil, err + } + + names := make([]string, 0, len(parsed)) + for _, object := range parsed { + if object.Subdir != "" { + names = append(names, object.Subdir) + } else { + names = append(names, object.Name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names, nil + case strings.HasPrefix(ct, "text/html"): + return []string{}, nil + default: + return nil, fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} + +// DownloadHeader represents the headers returned in the response from a +// Download request. +type DownloadHeader struct { + AcceptRanges string `json:"Accept-Ranges"` + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DownloadHeader) UnmarshalJSON(b []byte) error { + type tmp DownloadHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DownloadHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// DownloadResult is a *http.Response that is returned from a call to the +// Download function. +type DownloadResult struct { + gophercloud.HeaderResult + Body io.ReadCloser +} + +// Extract will return a struct of headers returned from a call to Download. +func (r DownloadResult) Extract() (*DownloadHeader, error) { + var s *DownloadHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractContent is a function that takes a DownloadResult's io.Reader body +// and reads all available data into a slice of bytes. Please be aware that due +// the nature of io.Reader is forward-only - meaning that it can only be read +// once and not rewound. You can recreate a reader from the output of this +// function by using bytes.NewReader(downloadBytes) +func (r *DownloadResult) ExtractContent() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + defer r.Body.Close() + body, err := ioutil.ReadAll(r.Body) + if err != nil { + return nil, err + } + r.Body.Close() + return body, nil +} + +// GetHeader represents the headers returned in the response from a Get request. +type GetHeader struct { + ContentDisposition string `json:"Content-Disposition"` + ContentEncoding string `json:"Content-Encoding"` + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + DeleteAt time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + ObjectManifest string `json:"X-Object-Manifest"` + StaticLargeObject bool `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *GetHeader) UnmarshalJSON(b []byte) error { + type tmp GetHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + DeleteAt gophercloud.JSONUnix `json:"X-Delete-At"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + StaticLargeObject interface{} `json:"X-Static-Large-Object"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = GetHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + switch t := s.StaticLargeObject.(type) { + case string: + if t == "True" || t == "true" { + r.StaticLargeObject = true + } + case bool: + r.StaticLargeObject = t + } + + r.Date = time.Time(s.Date) + r.DeleteAt = time.Time(s.DeleteAt) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// GetResult is a *http.Response that is returned from a call to the Get +// function. +type GetResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Get. +func (r GetResult) Extract() (*GetHeader, error) { + var s *GetHeader + err := r.ExtractInto(&s) + return s, err +} + +// ExtractMetadata is a function that takes a GetResult (of type *http.Response) +// and returns the custom metadata associated with the object. +func (r GetResult) ExtractMetadata() (map[string]string, error) { + if r.Err != nil { + return nil, r.Err + } + metadata := make(map[string]string) + for k, v := range r.Header { + if strings.HasPrefix(k, "X-Object-Meta-") { + key := strings.TrimPrefix(k, "X-Object-Meta-") + metadata[key] = v[0] + } + } + return metadata, nil +} + +// CreateHeader represents the headers returned in the response from a +// Create request. +type CreateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CreateHeader) UnmarshalJSON(b []byte) error { + type tmp CreateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CreateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CreateResult represents the result of a create operation. +type CreateResult struct { + checksum string + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Create. +func (r CreateResult) Extract() (*CreateHeader, error) { + //if r.Header.Get("ETag") != fmt.Sprintf("%x", localChecksum) { + // return nil, ErrWrongChecksum{} + //} + var s *CreateHeader + err := r.ExtractInto(&s) + return s, err +} + +// UpdateHeader represents the headers returned in the response from a +// Update request. +type UpdateHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *UpdateHeader) UnmarshalJSON(b []byte) error { + type tmp UpdateHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = UpdateHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// UpdateResult represents the result of an update operation. +type UpdateResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Update. +func (r UpdateResult) Extract() (*UpdateHeader, error) { + var s *UpdateHeader + err := r.ExtractInto(&s) + return s, err +} + +// DeleteHeader represents the headers returned in the response from a +// Delete request. +type DeleteHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + Date time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *DeleteHeader) UnmarshalJSON(b []byte) error { + type tmp DeleteHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + Date gophercloud.JSONRFC1123 `json:"Date"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = DeleteHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + + return nil +} + +// DeleteResult represents the result of a delete operation. +type DeleteResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Delete. +func (r DeleteResult) Extract() (*DeleteHeader, error) { + var s *DeleteHeader + err := r.ExtractInto(&s) + return s, err +} + +// CopyHeader represents the headers returned in the response from a +// Copy request. +type CopyHeader struct { + ContentLength int64 `json:"-"` + ContentType string `json:"Content-Type"` + CopiedFrom string `json:"X-Copied-From"` + CopiedFromLastModified time.Time `json:"-"` + Date time.Time `json:"-"` + ETag string `json:"Etag"` + LastModified time.Time `json:"-"` + TransID string `json:"X-Trans-Id"` +} + +func (r *CopyHeader) UnmarshalJSON(b []byte) error { + type tmp CopyHeader + var s struct { + tmp + ContentLength string `json:"Content-Length"` + CopiedFromLastModified gophercloud.JSONRFC1123 `json:"X-Copied-From-Last-Modified"` + Date gophercloud.JSONRFC1123 `json:"Date"` + LastModified gophercloud.JSONRFC1123 `json:"Last-Modified"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = CopyHeader(s.tmp) + + switch s.ContentLength { + case "": + r.ContentLength = 0 + default: + r.ContentLength, err = strconv.ParseInt(s.ContentLength, 10, 64) + if err != nil { + return err + } + } + + r.Date = time.Time(s.Date) + r.CopiedFromLastModified = time.Time(s.CopiedFromLastModified) + r.LastModified = time.Time(s.LastModified) + + return nil +} + +// CopyResult represents the result of a copy operation. +type CopyResult struct { + gophercloud.HeaderResult +} + +// Extract will return a struct of headers returned from a call to Copy. +func (r CopyResult) Extract() (*CopyHeader, error) { + var s *CopyHeader + err := r.ExtractInto(&s) + return s, err +} + +// extractLastMarker is a function that takes a page of objects and returns the +// marker for the page. This can either be a subdir or the last object's name. +func extractLastMarker(r pagination.Page) (string, error) { + casted := r.(ObjectPage) + + // If a delimiter was requested, check if a subdir exists. + queryParams, err := url.ParseQuery(casted.URL.RawQuery) + if err != nil { + return "", err + } + + var delimeter bool + if v, ok := queryParams["delimiter"]; ok && len(v) > 0 { + delimeter = true + } + + ct := casted.Header.Get("Content-Type") + switch { + case strings.HasPrefix(ct, "application/json"): + parsed, err := ExtractInfo(r) + if err != nil { + return "", err + } + + var lastObject Object + if len(parsed) > 0 { + lastObject = parsed[len(parsed)-1] + } + + if !delimeter { + return lastObject.Name, nil + } + + if lastObject.Name != "" { + return lastObject.Name, nil + } + + return lastObject.Subdir, nil + case strings.HasPrefix(ct, "text/plain"): + names := make([]string, 0, 50) + + body := string(r.(ObjectPage).Body.([]uint8)) + for _, name := range strings.Split(body, "\n") { + if len(name) > 0 { + names = append(names, name) + } + } + + return names[len(names)-1], err + case strings.HasPrefix(ct, "text/html"): + return "", nil + default: + return "", fmt.Errorf("Cannot extract names from response with content-type: [%s]", ct) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/doc.go new file mode 100644 index 000000000000..9ca1d8a11a7e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/doc.go @@ -0,0 +1,2 @@ +// objects unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/fixtures.go new file mode 100644 index 000000000000..0ede1d6201f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/fixtures.go @@ -0,0 +1,250 @@ +package testing + +import ( + "crypto/md5" + "fmt" + "io" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// HandleDownloadObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Download` response. +func HandleDownloadObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.Header().Set("Date", "Wed, 10 Nov 2009 23:00:00 GMT") + w.Header().Set("X-Static-Large-Object", "True") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Successful download with Gophercloud") + }) +} + +// ExpectedListInfo is the result expected from a call to `List` when full +// info is requested. +var ExpectedListInfo = []objects.Object{ + { + Hash: "451e372e48e0f6b1114fa0724aa79fa1", + LastModified: time.Date(2016, time.August, 17, 22, 11, 58, 602650000, time.UTC), //"2016-08-17T22:11:58.602650" + Bytes: 14, + Name: "goodbye", + ContentType: "application/octet-stream", + }, + { + Hash: "451e372e48e0f6b1114fa0724aa79fa1", + LastModified: time.Date(2016, time.August, 17, 22, 11, 58, 602650000, time.UTC), + Bytes: 14, + Name: "hello", + ContentType: "application/octet-stream", + }, +} + +// ExpectedListSubdir is the result expected from a call to `List` when full +// info is requested. +var ExpectedListSubdir = []objects.Object{ + { + Subdir: "directory/", + }, +} + +// ExpectedListNames is the result expected from a call to `List` when just +// object names are requested. +var ExpectedListNames = []string{"hello", "goodbye"} + +// HandleListObjectsInfoSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `List` response when full info is requested. +func HandleListObjectsInfoSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, `[ + { + "hash": "451e372e48e0f6b1114fa0724aa79fa1", + "last_modified": "2016-08-17T22:11:58.602650", + "bytes": 14, + "name": "goodbye", + "content_type": "application/octet-stream" + }, + { + "hash": "451e372e48e0f6b1114fa0724aa79fa1", + "last_modified": "2016-08-17T22:11:58.602650", + "bytes": 14, + "name": "hello", + "content_type": "application/octet-stream" + } + ]`) + case "hello": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleListSubdirSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `List` response when full info is requested. +func HandleListSubdirSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, `[ + { + "subdir": "directory/" + } + ]`) + case "directory/": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleListObjectNamesSuccessfully creates an HTTP handler at `/testContainer` on the test handler mux that +// responds with a `List` response when only object names are requested. +func HandleListObjectNamesSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "text/plain") + + w.Header().Set("Content-Type", "text/plain") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, "hello\ngoodbye\n") + case "goodbye": + fmt.Fprintf(w, "") + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// HandleCreateTextObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux +// that responds with a `Create` response. A Content-Type of "text/plain" is expected. +func HandleCreateTextObjectSuccessfully(t *testing.T, content string) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "text/plain") + th.TestHeader(t, r, "Accept", "application/json") + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := hash.Sum(nil) + + w.Header().Set("ETag", fmt.Sprintf("%x", localChecksum)) + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleCreateTextWithCacheControlSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler +// mux that responds with a `Create` response. A Cache-Control of `max-age="3600", public` is expected. +func HandleCreateTextWithCacheControlSuccessfully(t *testing.T, content string) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Cache-Control", `max-age="3600", public`) + th.TestHeader(t, r, "Accept", "application/json") + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := hash.Sum(nil) + + w.Header().Set("ETag", fmt.Sprintf("%x", localChecksum)) + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleCreateTypelessObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler +// mux that responds with a `Create` response. No Content-Type header may be present in the request, so that server- +// side content-type detection will be triggered properly. +func HandleCreateTypelessObjectSuccessfully(t *testing.T, content string) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + if contentType, present := r.Header["Content-Type"]; present { + t.Errorf("Expected Content-Type header to be omitted, but was %#v", contentType) + } + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := hash.Sum(nil) + + w.Header().Set("ETag", fmt.Sprintf("%x", localChecksum)) + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleCopyObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Copy` response. +func HandleCopyObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "COPY") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "Destination", "/newTestContainer/newTestObject") + w.WriteHeader(http.StatusCreated) + }) +} + +// HandleDeleteObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Delete` response. +func HandleDeleteObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// HandleUpdateObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Update` response. +func HandleUpdateObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + th.TestHeader(t, r, "X-Object-Meta-Gophercloud-Test", "objects") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleGetObjectSuccessfully creates an HTTP handler at `/testContainer/testObject` on the test handler mux that +// responds with a `Get` response. +func HandleGetObjectSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "HEAD") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.Header().Add("X-Object-Meta-Gophercloud-Test", "objects") + w.Header().Add("X-Static-Large-Object", "true") + w.WriteHeader(http.StatusNoContent) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/requests_test.go new file mode 100644 index 000000000000..c5b34a75b0da --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/testing/requests_test.go @@ -0,0 +1,312 @@ +package testing + +import ( + "bytes" + "crypto/md5" + "fmt" + "io" + "io/ioutil" + "strings" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +var ( + loc, _ = time.LoadLocation("GMT") +) + +func TestDownloadReader(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDownloadObjectSuccessfully(t) + + response := objects.Download(fake.ServiceClient(), "testContainer", "testObject", nil) + defer response.Body.Close() + + // Check reader + buf := bytes.NewBuffer(make([]byte, 0)) + io.CopyN(buf, response.Body, 10) + th.CheckEquals(t, "Successful", string(buf.Bytes())) +} + +func TestDownloadExtraction(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDownloadObjectSuccessfully(t) + + response := objects.Download(fake.ServiceClient(), "testContainer", "testObject", nil) + + // Check []byte extraction + bytes, err := response.ExtractContent() + th.AssertNoErr(t, err) + th.CheckEquals(t, "Successful download with Gophercloud", string(bytes)) + + expected := &objects.DownloadHeader{ + ContentLength: 36, + ContentType: "text/plain; charset=utf-8", + Date: time.Date(2009, time.November, 10, 23, 0, 0, 0, loc), + StaticLargeObject: true, + } + actual, err := response.Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) +} + +func TestListObjectInfo(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListObjectsInfoSuccessfully(t) + + count := 0 + options := &objects.ListOpts{Full: true} + err := objects.List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := objects.ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedListInfo, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListObjectSubdir(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSubdirSuccessfully(t) + + count := 0 + options := &objects.ListOpts{Full: true, Prefix: "", Delimiter: "/"} + err := objects.List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := objects.ExtractInfo(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ExpectedListSubdir, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListObjectNames(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListObjectNamesSuccessfully(t) + + // Check without delimiter. + count := 0 + options := &objects.ListOpts{Full: false} + err := objects.List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := objects.ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) + + // Check with delimiter. + count = 0 + options = &objects.ListOpts{Full: false, Delimiter: "/"} + err = objects.List(fake.ServiceClient(), "testContainer", options).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := objects.ExtractNames(page) + if err != nil { + t.Errorf("Failed to extract container names: %v", err) + return false, err + } + + th.CheckDeepEquals(t, ExpectedListNames, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestCreateObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "Did gyre and gimble in the wabe" + + HandleCreateTextObjectSuccessfully(t, content) + + options := &objects.CreateOpts{ContentType: "text/plain", Content: strings.NewReader(content)} + res := objects.Create(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestCreateObjectWithCacheControl(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "All mimsy were the borogoves" + + HandleCreateTextWithCacheControlSuccessfully(t, content) + + options := &objects.CreateOpts{ + CacheControl: `max-age="3600", public`, + Content: strings.NewReader(content), + } + res := objects.Create(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestCreateObjectWithoutContentType(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + content := "The sky was the color of television, tuned to a dead channel." + + HandleCreateTypelessObjectSuccessfully(t, content) + + res := objects.Create(fake.ServiceClient(), "testContainer", "testObject", &objects.CreateOpts{Content: strings.NewReader(content)}) + th.AssertNoErr(t, res.Err) +} + +/* +func TestErrorIsRaisedForChecksumMismatch(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/testContainer/testObject", func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("ETag", "acbd18db4cc2f85cedef654fccc4a4d8") + w.WriteHeader(http.StatusCreated) + }) + + content := strings.NewReader("The sky was the color of television, tuned to a dead channel.") + res := Create(fake.ServiceClient(), "testContainer", "testObject", &CreateOpts{Content: content}) + + err := fmt.Errorf("Local checksum does not match API ETag header") + th.AssertDeepEquals(t, err, res.Err) +} +*/ + +func TestCopyObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCopyObjectSuccessfully(t) + + options := &objects.CopyOpts{Destination: "/newTestContainer/newTestObject"} + res := objects.Copy(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestDeleteObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteObjectSuccessfully(t) + + res := objects.Delete(fake.ServiceClient(), "testContainer", "testObject", nil) + th.AssertNoErr(t, res.Err) +} + +func TestUpateObjectMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateObjectSuccessfully(t) + + options := &objects.UpdateOpts{Metadata: map[string]string{"Gophercloud-Test": "objects"}} + res := objects.Update(fake.ServiceClient(), "testContainer", "testObject", options) + th.AssertNoErr(t, res.Err) +} + +func TestGetObject(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetObjectSuccessfully(t) + + expected := map[string]string{"Gophercloud-Test": "objects"} + actual, err := objects.Get(fake.ServiceClient(), "testContainer", "testObject", nil).ExtractMetadata() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) + + getOpts := objects.GetOpts{ + Newest: true, + } + actualHeaders, err := objects.Get(fake.ServiceClient(), "testContainer", "testObject", getOpts).Extract() + th.AssertNoErr(t, err) + th.AssertEquals(t, actualHeaders.StaticLargeObject, true) +} + +func TestETag(t *testing.T) { + content := "some example object" + createOpts := objects.CreateOpts{ + Content: strings.NewReader(content), + NoETag: true, + } + + _, headers, _, err := createOpts.ToObjectCreateParams() + th.AssertNoErr(t, err) + _, ok := headers["ETag"] + th.AssertEquals(t, ok, false) + + hash := md5.New() + io.WriteString(hash, content) + localChecksum := fmt.Sprintf("%x", hash.Sum(nil)) + + createOpts = objects.CreateOpts{ + Content: strings.NewReader(content), + ETag: localChecksum, + } + + _, headers, _, err = createOpts.ToObjectCreateParams() + th.AssertNoErr(t, err) + th.AssertEquals(t, headers["ETag"], localChecksum) +} + +func TestObjectCreateParamsWithoutSeek(t *testing.T) { + content := "I do not implement Seek()" + buf := bytes.NewBuffer([]byte(content)) + + createOpts := objects.CreateOpts{Content: buf} + reader, headers, _, err := createOpts.ToObjectCreateParams() + + th.AssertNoErr(t, err) + + _, ok := reader.(io.ReadSeeker) + th.AssertEquals(t, ok, true) + + c, err := ioutil.ReadAll(reader) + th.AssertNoErr(t, err) + + th.AssertEquals(t, content, string(c)) + + _, ok = headers["ETag"] + th.AssertEquals(t, true, ok) +} + +func TestObjectCreateParamsWithSeek(t *testing.T) { + content := "I implement Seek()" + createOpts := objects.CreateOpts{Content: strings.NewReader(content)} + reader, headers, _, err := createOpts.ToObjectCreateParams() + + th.AssertNoErr(t, err) + + _, ok := reader.(io.ReadSeeker) + th.AssertEquals(t, ok, true) + + c, err := ioutil.ReadAll(reader) + th.AssertNoErr(t, err) + + th.AssertEquals(t, content, string(c)) + + _, ok = headers["ETag"] + th.AssertEquals(t, true, ok) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go new file mode 100644 index 000000000000..b3ac304b7428 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/objects/urls.go @@ -0,0 +1,33 @@ +package objects + +import ( + "github.com/gophercloud/gophercloud" +) + +func listURL(c *gophercloud.ServiceClient, container string) string { + return c.ServiceURL(container) +} + +func copyURL(c *gophercloud.ServiceClient, container, object string) string { + return c.ServiceURL(container, object) +} + +func createURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func getURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func deleteURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func downloadURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} + +func updateURL(c *gophercloud.ServiceClient, container, object string) string { + return copyURL(c, container, object) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go new file mode 100644 index 000000000000..989dc4ece2b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/doc.go @@ -0,0 +1,16 @@ +/* +Package swauth implements Swift's built-in authentication. + +Example to Authenticate with swauth + + authOpts := swauth.AuthOpts{ + User: "project:user", + Key: "password", + } + + swiftClient, err := swauth.NewObjectStorageV1(providerClient, authOpts) + if err != nil { + panic(err) + } +*/ +package swauth diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go new file mode 100644 index 000000000000..29bdcbcf76fe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/requests.go @@ -0,0 +1,70 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +// AuthOptsBuilder describes struct types that can be accepted by the Auth call. +type AuthOptsBuilder interface { + ToAuthOptsMap() (map[string]string, error) +} + +// AuthOpts specifies an authentication request. +type AuthOpts struct { + // User is an Swauth-based username in username:tenant format. + User string `h:"X-Auth-User" required:"true"` + + // Key is a secret/password to authenticate the User with. + Key string `h:"X-Auth-Key" required:"true"` +} + +// ToAuthOptsMap formats an AuthOpts structure into a request body. +func (opts AuthOpts) ToAuthOptsMap() (map[string]string, error) { + return gophercloud.BuildHeaders(opts) +} + +// Auth performs an authentication request for a Swauth-based user. +func Auth(c *gophercloud.ProviderClient, opts AuthOptsBuilder) (r GetAuthResult) { + h := make(map[string]string) + + if opts != nil { + headers, err := opts.ToAuthOptsMap() + if err != nil { + r.Err = err + return + } + + for k, v := range headers { + h[k] = v + } + } + + resp, err := c.Request("GET", getURL(c), &gophercloud.RequestOpts{ + MoreHeaders: h, + OkCodes: []int{200}, + }) + + if resp != nil { + r.Header = resp.Header + } + + r.Err = err + + return r +} + +// NewObjectStorageV1 creates a Swauth-authenticated *gophercloud.ServiceClient +// client that can issue ObjectStorage-based API calls. +func NewObjectStorageV1(pc *gophercloud.ProviderClient, authOpts AuthOpts) (*gophercloud.ServiceClient, error) { + auth, err := Auth(pc, authOpts).Extract() + if err != nil { + return nil, err + } + + swiftClient := &gophercloud.ServiceClient{ + ProviderClient: pc, + Endpoint: gophercloud.NormalizeURL(auth.StorageURL), + } + + swiftClient.TokenID = auth.Token + + return swiftClient, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go new file mode 100644 index 000000000000..f442f4725500 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/results.go @@ -0,0 +1,27 @@ +package swauth + +import ( + "github.com/gophercloud/gophercloud" +) + +// GetAuthResult contains the response from the Auth request. Call its Extract +// method to interpret it as an AuthResult. +type GetAuthResult struct { + gophercloud.HeaderResult +} + +// AuthResult contains the authentication information from a Swauth +// authentication request. +type AuthResult struct { + Token string `json:"X-Auth-Token"` + StorageURL string `json:"X-Storage-Url"` + CDNURL string `json:"X-CDN-Management-Url"` +} + +// Extract is a method that attempts to interpret any Swauth authentication +// response as a AuthResult struct. +func (r GetAuthResult) Extract() (*AuthResult, error) { + var s *AuthResult + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/doc.go new file mode 100644 index 000000000000..2388e8d40592 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/doc.go @@ -0,0 +1,2 @@ +// swauth unit tests +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/fixtures.go new file mode 100644 index 000000000000..79858f5c87b0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/fixtures.go @@ -0,0 +1,29 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// AuthResult is the expected result of AuthOutput +var AuthResult = swauth.AuthResult{ + Token: "AUTH_tk6223e6071f8f4299aa334b48015484a1", + StorageURL: "http://127.0.0.1:8080/v1/AUTH_test/", +} + +// HandleAuthSuccessfully configures the test server to respond to an Auth request. +func HandleAuthSuccessfully(t *testing.T, authOpts swauth.AuthOpts) { + th.Mux.HandleFunc("/auth/v1.0", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-User", authOpts.User) + th.TestHeader(t, r, "X-Auth-Key", authOpts.Key) + + w.Header().Add("X-Auth-Token", AuthResult.Token) + w.Header().Add("X-Storage-Url", AuthResult.StorageURL) + fmt.Fprintf(w, "") + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/requests_test.go new file mode 100644 index 000000000000..46571f611737 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/testing/requests_test.go @@ -0,0 +1,35 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack" + "github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestAuth(t *testing.T) { + authOpts := swauth.AuthOpts{ + User: "test:tester", + Key: "testing", + } + + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAuthSuccessfully(t, authOpts) + + providerClient, err := openstack.NewClient(th.Endpoint()) + th.AssertNoErr(t, err) + + swiftClient, err := swauth.NewObjectStorageV1(providerClient, authOpts) + th.AssertNoErr(t, err) + th.AssertEquals(t, swiftClient.TokenID, AuthResult.Token) +} + +func TestBadAuth(t *testing.T) { + authOpts := swauth.AuthOpts{} + _, err := authOpts.ToAuthOptsMap() + if err == nil { + t.Fatalf("Expected an error due to missing auth options") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go new file mode 100644 index 000000000000..a30cabd60e5a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/objectstorage/v1/swauth/urls.go @@ -0,0 +1,7 @@ +package swauth + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ProviderClient) string { + return c.IdentityBase + "auth/v1.0" +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/doc.go new file mode 100644 index 000000000000..f2db622d1fc2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/doc.go @@ -0,0 +1,4 @@ +// Package apiversions provides information and interaction with the different +// API versions for the OpenStack Heat service. This functionality is not +// restricted to this particular version. +package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/requests.go new file mode 100644 index 000000000000..ff383cff6849 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/requests.go @@ -0,0 +1,13 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ListVersions lists all the Neutron API versions available to end-users +func ListVersions(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, apiVersionsURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/results.go new file mode 100644 index 000000000000..a7c22a2739b5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/results.go @@ -0,0 +1,36 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// APIVersion represents an API version for Neutron. It contains the status of +// the API, and its unique ID. +type APIVersion struct { + Status string `json:"status"` + ID string `json:"id"` + Links []gophercloud.Link `json:"links"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + return len(is) == 0, err +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { + var s struct { + APIVersions []APIVersion `json:"versions"` + } + err := (r.(APIVersionPage)).ExtractInto(&s) + return s.APIVersions, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/doc.go new file mode 100644 index 000000000000..3d545fdda01f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_apiversions_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/requests_test.go new file mode 100644 index 000000000000..ac59b6c6c29c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/testing/requests_test.go @@ -0,0 +1,90 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` +{ + "versions": [ + { + "status": "CURRENT", + "id": "v1.0", + "links": [ + { + "href": "http://23.253.228.211:8000/v1", + "rel": "self" + } + ] + } + ] +}`) + }) + + count := 0 + + apiversions.ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := apiversions.ExtractAPIVersions(page) + if err != nil { + t.Errorf("Failed to extract API versions: %v", err) + return false, err + } + + expected := []apiversions.APIVersion{ + { + Status: "CURRENT", + ID: "v1.0", + Links: []gophercloud.Link{ + gophercloud.Link{ + Href: "http://23.253.228.211:8000/v1", + Rel: "self", + }, + }, + }, + } + + th.AssertDeepEquals(t, expected, actual) + + return true, nil + }) + + if count != 1 { + t.Errorf("Expected 1 page, got %d", count) + } +} + +func TestNonJSONCannotBeExtractedIntoAPIVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + apiversions.ListVersions(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + if _, err := apiversions.ExtractAPIVersions(page); err == nil { + t.Fatalf("Expected error, got nil") + } + return true, nil + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/urls.go new file mode 100644 index 000000000000..0205405a0477 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/apiversions/urls.go @@ -0,0 +1,7 @@ +package apiversions + +import "github.com/gophercloud/gophercloud" + +func apiVersionsURL(c *gophercloud.ServiceClient) string { + return c.Endpoint +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/doc.go new file mode 100644 index 000000000000..183e8dfa76d0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/doc.go @@ -0,0 +1,2 @@ +// Package buildinfo provides build information about heat deployments. +package buildinfo diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/requests.go new file mode 100644 index 000000000000..32f6032d6640 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/requests.go @@ -0,0 +1,9 @@ +package buildinfo + +import "github.com/gophercloud/gophercloud" + +// Get retreives data for the given stack template. +func Get(c *gophercloud.ServiceClient) (r GetResult) { + _, r.Err = c.Get(getURL(c), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/results.go new file mode 100644 index 000000000000..c3d2cdbeffbf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/results.go @@ -0,0 +1,29 @@ +package buildinfo + +import ( + "github.com/gophercloud/gophercloud" +) + +// Revision represents the API/Engine revision of a Heat deployment. +type Revision struct { + Revision string `json:"revision"` +} + +// BuildInfo represents the build information for a Heat deployment. +type BuildInfo struct { + API Revision `json:"api"` + Engine Revision `json:"engine"` +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a BuildInfo object and is called after a +// Get operation. +func (r GetResult) Extract() (*BuildInfo, error) { + var s *BuildInfo + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/doc.go new file mode 100644 index 000000000000..3655974438fd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_buildinfo_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/fixtures.go new file mode 100644 index 000000000000..c240d5f5811e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/fixtures.go @@ -0,0 +1,46 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// GetExpected represents the expected object from a Get request. +var GetExpected = &buildinfo.BuildInfo{ + API: buildinfo.Revision{ + Revision: "2.4.5", + }, + Engine: buildinfo.Revision{ + Revision: "1.2.1", + }, +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "api": { + "revision": "2.4.5" + }, + "engine": { + "revision": "1.2.1" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/build_info` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/build_info", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/requests_test.go new file mode 100644 index 000000000000..bd2e164af039 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/testing/requests_test.go @@ -0,0 +1,21 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := buildinfo.Get(fake.ServiceClient()).Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/urls.go new file mode 100644 index 000000000000..28a2128df8b8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/buildinfo/urls.go @@ -0,0 +1,7 @@ +package buildinfo + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("build_info") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/doc.go new file mode 100644 index 000000000000..51cdd97473cb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/doc.go @@ -0,0 +1,4 @@ +// Package stackevents provides operations for finding, listing, and retrieving +// stack events. Stack events are events that take place on stacks such as +// updating and abandoning. +package stackevents diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/requests.go new file mode 100644 index 000000000000..e6e7f7914760 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/requests.go @@ -0,0 +1,182 @@ +package stackevents + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Find retrieves stack events for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) (r FindResult) { + _, r.Err = c.Get(findURL(c, stackName), &r.Body, nil) + return +} + +// SortDir is a type for specifying in which direction to sort a list of events. +type SortDir string + +// SortKey is a type for specifying by which key to sort a list of events. +type SortKey string + +// ResourceStatus is a type for specifying by which resource status to filter a +// list of events. +type ResourceStatus string + +// ResourceAction is a type for specifying by which resource action to filter a +// list of events. +type ResourceAction string + +var ( + // ResourceStatusInProgress is used to filter a List request by the 'IN_PROGRESS' status. + ResourceStatusInProgress ResourceStatus = "IN_PROGRESS" + // ResourceStatusComplete is used to filter a List request by the 'COMPLETE' status. + ResourceStatusComplete ResourceStatus = "COMPLETE" + // ResourceStatusFailed is used to filter a List request by the 'FAILED' status. + ResourceStatusFailed ResourceStatus = "FAILED" + + // ResourceActionCreate is used to filter a List request by the 'CREATE' action. + ResourceActionCreate ResourceAction = "CREATE" + // ResourceActionDelete is used to filter a List request by the 'DELETE' action. + ResourceActionDelete ResourceAction = "DELETE" + // ResourceActionUpdate is used to filter a List request by the 'UPDATE' action. + ResourceActionUpdate ResourceAction = "UPDATE" + // ResourceActionRollback is used to filter a List request by the 'ROLLBACK' action. + ResourceActionRollback ResourceAction = "ROLLBACK" + // ResourceActionSuspend is used to filter a List request by the 'SUSPEND' action. + ResourceActionSuspend ResourceAction = "SUSPEND" + // ResourceActionResume is used to filter a List request by the 'RESUME' action. + ResourceActionResume ResourceAction = "RESUME" + // ResourceActionAbandon is used to filter a List request by the 'ABANDON' action. + ResourceActionAbandon ResourceAction = "ABANDON" + + // SortAsc is used to sort a list of stacks in ascending order. + SortAsc SortDir = "asc" + // SortDesc is used to sort a list of stacks in descending order. + SortDesc SortDir = "desc" + + // SortName is used to sort a list of stacks by name. + SortName SortKey = "name" + // SortResourceType is used to sort a list of stacks by resource type. + SortResourceType SortKey = "resource_type" + // SortCreatedAt is used to sort a list of stacks by date created. + SortCreatedAt SortKey = "created_at" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackEventListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + // The stack resource ID with which to start the listing. + Marker string `q:"marker"` + // Integer value for the limit of values to return. + Limit int `q:"limit"` + // Filters the event list by the specified ResourceAction. You can use this + // filter multiple times to filter by multiple resource actions: CREATE, DELETE, + // UPDATE, ROLLBACK, SUSPEND, RESUME or ADOPT. + ResourceActions []ResourceAction `q:"resource_action"` + // Filters the event list by the specified resource_status. You can use this + // filter multiple times to filter by multiple resource statuses: IN_PROGRESS, + // COMPLETE or FAILED. + ResourceStatuses []ResourceStatus `q:"resource_status"` + // Filters the event list by the specified resource_name. You can use this + // filter multiple times to filter by multiple resource names. + ResourceNames []string `q:"resource_name"` + // Filters the event list by the specified resource_type. You can use this + // filter multiple times to filter by multiple resource types: OS::Nova::Server, + // OS::Cinder::Volume, and so on. + ResourceTypes []string `q:"resource_type"` + // Sorts the event list by: resource_type or created_at. + SortKey SortKey `q:"sort_keys"` + // The sort direction of the event list. Which is asc (ascending) or desc (descending). + SortDir SortDir `q:"sort_dir"` +} + +// ToStackEventListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackEventListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list resources for the given stack. +func List(client *gophercloud.ServiceClient, stackName, stackID string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, stackName, stackID) + if opts != nil { + query, err := opts.ToStackEventListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := EventPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// ListResourceEventsOptsBuilder allows extensions to add additional parameters to the +// ListResourceEvents request. +type ListResourceEventsOptsBuilder interface { + ToResourceEventListQuery() (string, error) +} + +// ListResourceEventsOpts allows the filtering and sorting of paginated resource events through +// the API. Marker and Limit are used for pagination. +type ListResourceEventsOpts struct { + // The stack resource ID with which to start the listing. + Marker string `q:"marker"` + // Integer value for the limit of values to return. + Limit int `q:"limit"` + // Filters the event list by the specified ResourceAction. You can use this + // filter multiple times to filter by multiple resource actions: CREATE, DELETE, + // UPDATE, ROLLBACK, SUSPEND, RESUME or ADOPT. + ResourceActions []string `q:"resource_action"` + // Filters the event list by the specified resource_status. You can use this + // filter multiple times to filter by multiple resource statuses: IN_PROGRESS, + // COMPLETE or FAILED. + ResourceStatuses []string `q:"resource_status"` + // Filters the event list by the specified resource_name. You can use this + // filter multiple times to filter by multiple resource names. + ResourceNames []string `q:"resource_name"` + // Filters the event list by the specified resource_type. You can use this + // filter multiple times to filter by multiple resource types: OS::Nova::Server, + // OS::Cinder::Volume, and so on. + ResourceTypes []string `q:"resource_type"` + // Sorts the event list by: resource_type or created_at. + SortKey SortKey `q:"sort_keys"` + // The sort direction of the event list. Which is asc (ascending) or desc (descending). + SortDir SortDir `q:"sort_dir"` +} + +// ToResourceEventListQuery formats a ListResourceEventsOpts into a query string. +func (opts ListResourceEventsOpts) ToResourceEventListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListResourceEvents makes a request against the API to list resources for the given stack. +func ListResourceEvents(client *gophercloud.ServiceClient, stackName, stackID, resourceName string, opts ListResourceEventsOptsBuilder) pagination.Pager { + url := listResourceEventsURL(client, stackName, stackID, resourceName) + if opts != nil { + query, err := opts.ToResourceEventListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := EventPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) (r GetResult) { + _, r.Err = c.Get(getURL(c, stackName, stackID, resourceName, eventID), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/results.go new file mode 100644 index 000000000000..46fb0ff088c9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/results.go @@ -0,0 +1,119 @@ +package stackevents + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Event represents a stack event. +type Event struct { + // The name of the resource for which the event occurred. + ResourceName string `json:"resource_name"` + // The time the event occurred. + Time time.Time `json:"-"` + // The URLs to the event. + Links []gophercloud.Link `json:"links"` + // The logical ID of the stack resource. + LogicalResourceID string `json:"logical_resource_id"` + // The reason of the status of the event. + ResourceStatusReason string `json:"resource_status_reason"` + // The status of the event. + ResourceStatus string `json:"resource_status"` + // The physical ID of the stack resource. + PhysicalResourceID string `json:"physical_resource_id"` + // The event ID. + ID string `json:"id"` + // Properties of the stack resource. + ResourceProperties map[string]interface{} `json:"resource_properties"` +} + +func (r *Event) UnmarshalJSON(b []byte) error { + type tmp Event + var s struct { + tmp + Time gophercloud.JSONRFC3339NoZ `json:"event_time"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Event(s.tmp) + + r.Time = time.Time(s.Time) + + return nil +} + +// FindResult represents the result of a Find operation. +type FindResult struct { + gophercloud.Result +} + +// Extract returns a slice of Event objects and is called after a +// Find operation. +func (r FindResult) Extract() ([]Event, error) { + var s struct { + Events []Event `json:"events"` + } + err := r.ExtractInto(&s) + return s.Events, err +} + +// EventPage abstracts the raw results of making a List() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResources call. +type EventPage struct { + pagination.MarkerPageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r EventPage) IsEmpty() (bool, error) { + events, err := ExtractEvents(r) + return len(events) == 0, err +} + +// LastMarker returns the last stack ID in a ListResult. +func (r EventPage) LastMarker() (string, error) { + events, err := ExtractEvents(r) + if err != nil { + return "", err + } + if len(events) == 0 { + return "", nil + } + return events[len(events)-1].ID, nil +} + +// ExtractEvents interprets the results of a single page from a List() call, producing a slice of Event entities. +func ExtractEvents(r pagination.Page) ([]Event, error) { + var s struct { + Events []Event `json:"events"` + } + err := (r.(EventPage)).ExtractInto(&s) + return s.Events, err +} + +// ExtractResourceEvents interprets the results of a single page from a +// ListResourceEvents() call, producing a slice of Event entities. +func ExtractResourceEvents(page pagination.Page) ([]Event, error) { + return ExtractEvents(page) +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to an Event object and is called after a +// Get operation. +func (r GetResult) Extract() (*Event, error) { + var s struct { + Event *Event `json:"event"` + } + err := r.ExtractInto(&s) + return s.Event, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/doc.go new file mode 100644 index 000000000000..2e22a6c1671a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_stackevents_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/fixtures.go new file mode 100644 index 000000000000..a40e8d4f60e7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/fixtures.go @@ -0,0 +1,447 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// FindExpected represents the expected object from a Find request. +var FindExpected = []stackevents.Event{ + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// FindOutput represents the response body from a Find request. +const FindOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleFindSuccessfully creates an HTTP handler at `/stacks/postman_stack/events` +// on the test handler mux that responds with a `Find` response. +func HandleFindSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []stackevents.Event{ + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// ListOutput represents the response body from a List request. +const ListOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleListSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/events` +// on the test handler mux that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "93940999-7d40-44ae-8de4-19624e7b8d18": + fmt.Fprintf(w, `{"events":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// ListResourceEventsExpected represents the expected object from a ListResourceEvents request. +var ListResourceEventsExpected = []stackevents.Event{ + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_IN_PROGRESS", + PhysicalResourceID: "", + ID: "06feb26f-9298-4a9b-8749-9d770e5d577a", + }, + { + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", + }, +} + +// ListResourceEventsOutput represents the response body from a ListResourceEvents request. +const ListResourceEventsOutput = ` +{ + "events": [ + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:11", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/06feb26f-9298-4a9b-8749-9d770e5d577a", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": null, + "id": "06feb26f-9298-4a9b-8749-9d770e5d577a" + }, + { + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } + ] +}` + +// HandleListResourceEventsSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events` +// on the test handler mux that responds with a `ListResourceEvents` response. +func HandleListResourceEventsSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "93940999-7d40-44ae-8de4-19624e7b8d18": + fmt.Fprintf(w, `{"events":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &stackevents.Event{ + ResourceName: "hello_world", + Time: time.Date(2015, 2, 5, 21, 33, 27, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "resource", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalResourceID: "hello_world", + ResourceStatusReason: "state changed", + ResourceStatus: "CREATE_COMPLETE", + PhysicalResourceID: "49181cd6-169a-4130-9455-31185bbfc5bf", + ID: "93940999-7d40-44ae-8de4-19624e7b8d18", +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "event":{ + "resource_name": "hello_world", + "event_time": "2015-02-05T21:33:27", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world/events/93940999-7d40-44ae-8de4-19624e7b8d18", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "resource" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "resource_status": "CREATE_COMPLETE", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "id": "93940999-7d40-44ae-8de4-19624e7b8d18" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events/93940999-7d40-44ae-8de4-19624e7b8d18` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources/my_resource/events/93940999-7d40-44ae-8de4-19624e7b8d18", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/requests_test.go new file mode 100644 index 000000000000..0ad3fc31f621 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/testing/requests_test.go @@ -0,0 +1,72 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestFindEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleFindSuccessfully(t, FindOutput) + + actual, err := stackevents.Find(fake.ServiceClient(), "postman_stack").Extract() + th.AssertNoErr(t, err) + + expected := FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, ListOutput) + + count := 0 + err := stackevents.List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestListResourceEvents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListResourceEventsSuccessfully(t, ListResourceEventsOutput) + + count := 0 + err := stackevents.ListResourceEvents(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := stackevents.ExtractEvents(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListResourceEventsExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetEvent(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := stackevents.Get(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", "my_resource", "93940999-7d40-44ae-8de4-19624e7b8d18").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/urls.go new file mode 100644 index 000000000000..6b6b33089406 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackevents/urls.go @@ -0,0 +1,19 @@ +package stackevents + +import "github.com/gophercloud/gophercloud" + +func findURL(c *gophercloud.ServiceClient, stackName string) string { + return c.ServiceURL("stacks", stackName, "events") +} + +func listURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "events") +} + +func listResourceEventsURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "events") +} + +func getURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName, eventID string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "events", eventID) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/doc.go new file mode 100644 index 000000000000..e4f8b08dcc7e --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/doc.go @@ -0,0 +1,5 @@ +// Package stackresources provides operations for working with stack resources. +// A resource is a template artifact that represents some component of your +// desired architecture (a Cloud Server, a group of scaled Cloud Servers, a load +// balancer, some configuration management system, and so forth). +package stackresources diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/requests.go new file mode 100644 index 000000000000..f368b76c6dee --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/requests.go @@ -0,0 +1,77 @@ +package stackresources + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Find retrieves stack resources for the given stack name. +func Find(c *gophercloud.ServiceClient, stackName string) (r FindResult) { + _, r.Err = c.Get(findURL(c, stackName), &r.Body, nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackResourceListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Marker and Limit are used for pagination. +type ListOpts struct { + // Include resources from nest stacks up to Depth levels of recursion. + Depth int `q:"nested_depth"` +} + +// ToStackResourceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackResourceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List makes a request against the API to list resources for the given stack. +func List(client *gophercloud.ServiceClient, stackName, stackID string, opts ListOptsBuilder) pagination.Pager { + url := listURL(client, stackName, stackID) + if opts != nil { + query, err := opts.ToStackResourceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ResourcePage{pagination.SinglePageBase(r)} + }) +} + +// Get retreives data for the given stack resource. +func Get(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) (r GetResult) { + _, r.Err = c.Get(getURL(c, stackName, stackID, resourceName), &r.Body, nil) + return +} + +// Metadata retreives the metadata for the given stack resource. +func Metadata(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) (r MetadataResult) { + _, r.Err = c.Get(metadataURL(c, stackName, stackID, resourceName), &r.Body, nil) + return +} + +// ListTypes makes a request against the API to list resource types. +func ListTypes(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listTypesURL(client), func(r pagination.PageResult) pagination.Page { + return ResourceTypePage{pagination.SinglePageBase(r)} + }) +} + +// Schema retreives the schema for the given resource type. +func Schema(c *gophercloud.ServiceClient, resourceType string) (r SchemaResult) { + _, r.Err = c.Get(schemaURL(c, resourceType), &r.Body, nil) + return +} + +// Template retreives the template representation for the given resource type. +func Template(c *gophercloud.ServiceClient, resourceType string) (r TemplateResult) { + _, r.Err = c.Get(templateURL(c, resourceType), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/results.go new file mode 100644 index 000000000000..59c02a38c14c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/results.go @@ -0,0 +1,185 @@ +package stackresources + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// Resource represents a stack resource. +type Resource struct { + Attributes map[string]interface{} `json:"attributes"` + CreationTime time.Time `json:"-"` + Description string `json:"description"` + Links []gophercloud.Link `json:"links"` + LogicalID string `json:"logical_resource_id"` + Name string `json:"resource_name"` + PhysicalID string `json:"physical_resource_id"` + RequiredBy []interface{} `json:"required_by"` + Status string `json:"resource_status"` + StatusReason string `json:"resource_status_reason"` + Type string `json:"resource_type"` + UpdatedTime time.Time `json:"-"` +} + +func (r *Resource) UnmarshalJSON(b []byte) error { + type tmp Resource + var s struct { + tmp + CreationTime gophercloud.JSONRFC3339NoZ `json:"creation_time"` + UpdatedTime gophercloud.JSONRFC3339NoZ `json:"updated_time"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Resource(s.tmp) + + r.CreationTime = time.Time(s.CreationTime) + r.UpdatedTime = time.Time(s.UpdatedTime) + + return nil +} + +// FindResult represents the result of a Find operation. +type FindResult struct { + gophercloud.Result +} + +// Extract returns a slice of Resource objects and is called after a +// Find operation. +func (r FindResult) Extract() ([]Resource, error) { + var s struct { + Resources []Resource `json:"resources"` + } + err := r.ExtractInto(&s) + return s.Resources, err +} + +// ResourcePage abstracts the raw results of making a List() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResources call. +type ResourcePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a page contains no Server results. +func (r ResourcePage) IsEmpty() (bool, error) { + resources, err := ExtractResources(r) + return len(resources) == 0, err +} + +// ExtractResources interprets the results of a single page from a List() call, producing a slice of Resource entities. +func ExtractResources(r pagination.Page) ([]Resource, error) { + var s struct { + Resources []Resource `json:"resources"` + } + err := (r.(ResourcePage)).ExtractInto(&s) + return s.Resources, err +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a Resource object and is called after a +// Get operation. +func (r GetResult) Extract() (*Resource, error) { + var s struct { + Resource *Resource `json:"resource"` + } + err := r.ExtractInto(&s) + return s.Resource, err +} + +// MetadataResult represents the result of a Metadata operation. +type MetadataResult struct { + gophercloud.Result +} + +// Extract returns a map object and is called after a +// Metadata operation. +func (r MetadataResult) Extract() (map[string]string, error) { + var s struct { + Meta map[string]string `json:"metadata"` + } + err := r.ExtractInto(&s) + return s.Meta, err +} + +// ResourceTypePage abstracts the raw results of making a ListTypes() request against the API. +// As OpenStack extensions may freely alter the response bodies of structures returned to the client, you may only safely access the +// data provided through the ExtractResourceTypes call. +type ResourceTypePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ResourceTypePage contains no resource types. +func (r ResourceTypePage) IsEmpty() (bool, error) { + rts, err := ExtractResourceTypes(r) + return len(rts) == 0, err +} + +// ResourceTypes represents the type that holds the result of ExtractResourceTypes. +// We define methods on this type to sort it before output +type ResourceTypes []string + +func (r ResourceTypes) Len() int { + return len(r) +} + +func (r ResourceTypes) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +func (r ResourceTypes) Less(i, j int) bool { + return r[i] < r[j] +} + +// ExtractResourceTypes extracts and returns resource types. +func ExtractResourceTypes(r pagination.Page) (ResourceTypes, error) { + var s struct { + ResourceTypes ResourceTypes `json:"resource_types"` + } + err := (r.(ResourceTypePage)).ExtractInto(&s) + return s.ResourceTypes, err +} + +// TypeSchema represents a stack resource schema. +type TypeSchema struct { + Attributes map[string]interface{} `json:"attributes"` + Properties map[string]interface{} `json:"properties"` + ResourceType string `json:"resource_type"` + SupportStatus map[string]interface{} `json:"support_status"` +} + +// SchemaResult represents the result of a Schema operation. +type SchemaResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a TypeSchema object and is called after a +// Schema operation. +func (r SchemaResult) Extract() (*TypeSchema, error) { + var s *TypeSchema + err := r.ExtractInto(&s) + return s, err +} + +// TemplateResult represents the result of a Template operation. +type TemplateResult struct { + gophercloud.Result +} + +// Extract returns the template and is called after a +// Template operation. +func (r TemplateResult) Extract() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + template, err := json.MarshalIndent(r.Body, "", " ") + return template, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/doc.go new file mode 100644 index 000000000000..16e1dae291bf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_stackresources_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/fixtures.go new file mode 100644 index 000000000000..e8903374ec0c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/fixtures.go @@ -0,0 +1,440 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// FindExpected represents the expected object from a Find request. +var FindExpected = []stackresources.Resource{ + { + Name: "hello_world", + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalID: "hello_world", + StatusReason: "state changed", + UpdatedTime: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + CreationTime: time.Date(2015, 2, 5, 21, 33, 10, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_IN_PROGRESS", + PhysicalID: "49181cd6-169a-4130-9455-31185bbfc5bf", + Type: "OS::Nova::Server", + Attributes: map[string]interface{}{"SXSW": "atx"}, + Description: "Some resource", + }, +} + +// FindOutput represents the response body from a Find request. +const FindOutput = ` +{ + "resources": [ + { + "description": "Some resource", + "attributes": {"SXSW": "atx"}, + "resource_name": "hello_world", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "updated_time": "2015-02-05T21:33:11", + "creation_time": "2015-02-05T21:33:10", + "required_by": [], + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "resource_type": "OS::Nova::Server" + } + ] +}` + +// HandleFindSuccessfully creates an HTTP handler at `/stacks/hello_world/resources` +// on the test handler mux that responds with a `Find` response. +func HandleFindSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []stackresources.Resource{ + { + Name: "hello_world", + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + Rel: "stack", + }, + }, + LogicalID: "hello_world", + StatusReason: "state changed", + UpdatedTime: time.Date(2015, 2, 5, 21, 33, 11, 0, time.UTC), + CreationTime: time.Date(2015, 2, 5, 21, 33, 10, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_IN_PROGRESS", + PhysicalID: "49181cd6-169a-4130-9455-31185bbfc5bf", + Type: "OS::Nova::Server", + Attributes: map[string]interface{}{"SXSW": "atx"}, + Description: "Some resource", + }, +} + +// ListOutput represents the response body from a List request. +const ListOutput = `{ + "resources": [ + { + "resource_name": "hello_world", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b/resources/hello_world", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/5f57cff9-93fc-424e-9f78-df0515e7f48b", + "rel": "stack" + } + ], + "logical_resource_id": "hello_world", + "resource_status_reason": "state changed", + "updated_time": "2015-02-05T21:33:11", + "required_by": [], + "resource_status": "CREATE_IN_PROGRESS", + "physical_resource_id": "49181cd6-169a-4130-9455-31185bbfc5bf", + "creation_time": "2015-02-05T21:33:10", + "resource_type": "OS::Nova::Server", + "attributes": {"SXSW": "atx"}, + "description": "Some resource" + } +] +}` + +// HandleListSuccessfully creates an HTTP handler at `/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources` +// on the test handler mux that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/hello_world/49181cd6-169a-4130-9455-31185bbfc5bf/resources", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "49181cd6-169a-4130-9455-31185bbfc5bf": + fmt.Fprintf(w, `{"resources":[]}`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &stackresources.Resource{ + Name: "wordpress_instance", + Links: []gophercloud.Link{ + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", + Rel: "self", + }, + { + Href: "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e", + Rel: "stack", + }, + }, + LogicalID: "wordpress_instance", + Attributes: map[string]interface{}{"SXSW": "atx"}, + StatusReason: "state changed", + UpdatedTime: time.Date(2014, 12, 10, 18, 34, 35, 0, time.UTC), + RequiredBy: []interface{}{}, + Status: "CREATE_COMPLETE", + PhysicalID: "00e3a2fe-c65d-403c-9483-4db9930dd194", + Type: "OS::Nova::Server", +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "resource": { + "description": "Some resource", + "attributes": {"SXSW": "atx"}, + "resource_name": "wordpress_instance", + "description": "", + "links": [ + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", + "rel": "self" + }, + { + "href": "http://166.78.160.107:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e", + "rel": "stack" + } + ], + "logical_resource_id": "wordpress_instance", + "resource_status": "CREATE_COMPLETE", + "updated_time": "2014-12-10T18:34:35", + "required_by": [], + "resource_status_reason": "state changed", + "physical_resource_id": "00e3a2fe-c65d-403c-9483-4db9930dd194", + "resource_type": "OS::Nova::Server" + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// MetadataExpected represents the expected object from a Metadata request. +var MetadataExpected = map[string]string{ + "number": "7", + "animal": "auk", +} + +// MetadataOutput represents the response body from a Metadata request. +const MetadataOutput = ` +{ + "metadata": { + "number": "7", + "animal": "auk" + } +}` + +// HandleMetadataSuccessfully creates an HTTP handler at `/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance/metadata` +// on the test handler mux that responds with a `Metadata` response. +func HandleMetadataSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/teststack/0b1771bd-9336-4f2b-ae86-a80f971faf1e/resources/wordpress_instance/metadata", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ListTypesExpected represents the expected object from a ListTypes request. +var ListTypesExpected = stackresources.ResourceTypes{ + "OS::Nova::Server", + "OS::Heat::RandomString", + "OS::Swift::Container", + "OS::Trove::Instance", + "OS::Nova::FloatingIPAssociation", + "OS::Cinder::VolumeAttachment", + "OS::Nova::FloatingIP", + "OS::Nova::KeyPair", +} + +// same as above, but sorted +var SortedListTypesExpected = stackresources.ResourceTypes{ + "OS::Cinder::VolumeAttachment", + "OS::Heat::RandomString", + "OS::Nova::FloatingIP", + "OS::Nova::FloatingIPAssociation", + "OS::Nova::KeyPair", + "OS::Nova::Server", + "OS::Swift::Container", + "OS::Trove::Instance", +} + +// ListTypesOutput represents the response body from a ListTypes request. +const ListTypesOutput = ` +{ + "resource_types": [ + "OS::Nova::Server", + "OS::Heat::RandomString", + "OS::Swift::Container", + "OS::Trove::Instance", + "OS::Nova::FloatingIPAssociation", + "OS::Cinder::VolumeAttachment", + "OS::Nova::FloatingIP", + "OS::Nova::KeyPair" + ] +}` + +// HandleListTypesSuccessfully creates an HTTP handler at `/resource_types` +// on the test handler mux that responds with a `ListTypes` response. +func HandleListTypesSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// GetSchemaExpected represents the expected object from a Schema request. +var GetSchemaExpected = &stackresources.TypeSchema{ + Attributes: map[string]interface{}{ + "an_attribute": map[string]interface{}{ + "description": "An attribute description .", + }, + }, + Properties: map[string]interface{}{ + "a_property": map[string]interface{}{ + "update_allowed": false, + "required": true, + "type": "string", + "description": "A resource description.", + }, + }, + ResourceType: "OS::Heat::AResourceName", + SupportStatus: map[string]interface{}{ + "message": "A status message", + "status": "SUPPORTED", + "version": "2014.1", + }, +} + +// GetSchemaOutput represents the response body from a Schema request. +const GetSchemaOutput = ` +{ + "attributes": { + "an_attribute": { + "description": "An attribute description ." + } + }, + "properties": { + "a_property": { + "update_allowed": false, + "required": true, + "type": "string", + "description": "A resource description." + } + }, + "resource_type": "OS::Heat::AResourceName", + "support_status": { + "message": "A status message", + "status": "SUPPORTED", + "version": "2014.1" + } +}` + +// HandleGetSchemaSuccessfully creates an HTTP handler at `/resource_types/OS::Heat::AResourceName` +// on the test handler mux that responds with a `Schema` response. +func HandleGetSchemaSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types/OS::Heat::AResourceName", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// GetTemplateExpected represents the expected object from a Template request. +var GetTemplateExpected = "{\n \"HeatTemplateFormatVersion\": \"2012-12-12\",\n \"Outputs\": {\n \"private_key\": {\n \"Description\": \"The private key if it has been saved.\",\n \"Value\": \"{\\\"Fn::GetAtt\\\": [\\\"KeyPair\\\", \\\"private_key\\\"]}\"\n },\n \"public_key\": {\n \"Description\": \"The public key.\",\n \"Value\": \"{\\\"Fn::GetAtt\\\": [\\\"KeyPair\\\", \\\"public_key\\\"]}\"\n }\n },\n \"Parameters\": {\n \"name\": {\n \"Description\": \"The name of the key pair.\",\n \"Type\": \"String\"\n },\n \"public_key\": {\n \"Description\": \"The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.\",\n \"Type\": \"String\"\n },\n \"save_private_key\": {\n \"AllowedValues\": [\n \"True\",\n \"true\",\n \"False\",\n \"false\"\n ],\n \"Default\": false,\n \"Description\": \"True if the system should remember a generated private key; False otherwise.\",\n \"Type\": \"String\"\n }\n },\n \"Resources\": {\n \"KeyPair\": {\n \"Properties\": {\n \"name\": {\n \"Ref\": \"name\"\n },\n \"public_key\": {\n \"Ref\": \"public_key\"\n },\n \"save_private_key\": {\n \"Ref\": \"save_private_key\"\n }\n },\n \"Type\": \"OS::Nova::KeyPair\"\n }\n }\n}" + +// GetTemplateOutput represents the response body from a Template request. +const GetTemplateOutput = ` +{ + "HeatTemplateFormatVersion": "2012-12-12", + "Outputs": { + "private_key": { + "Description": "The private key if it has been saved.", + "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"private_key\"]}" + }, + "public_key": { + "Description": "The public key.", + "Value": "{\"Fn::GetAtt\": [\"KeyPair\", \"public_key\"]}" + } + }, + "Parameters": { + "name": { + "Description": "The name of the key pair.", + "Type": "String" + }, + "public_key": { + "Description": "The optional public key. This allows users to supply the public key from a pre-existing key pair. If not supplied, a new key pair will be generated.", + "Type": "String" + }, + "save_private_key": { + "AllowedValues": [ + "True", + "true", + "False", + "false" + ], + "Default": false, + "Description": "True if the system should remember a generated private key; False otherwise.", + "Type": "String" + } + }, + "Resources": { + "KeyPair": { + "Properties": { + "name": { + "Ref": "name" + }, + "public_key": { + "Ref": "public_key" + }, + "save_private_key": { + "Ref": "save_private_key" + } + }, + "Type": "OS::Nova::KeyPair" + } + } +}` + +// HandleGetTemplateSuccessfully creates an HTTP handler at `/resource_types/OS::Heat::AResourceName/template` +// on the test handler mux that responds with a `Template` response. +func HandleGetTemplateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/resource_types/OS::Heat::AResourceName/template", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/requests_test.go new file mode 100644 index 000000000000..c714047fa317 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/testing/requests_test.go @@ -0,0 +1,112 @@ +package testing + +import ( + "sort" + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestFindResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleFindSuccessfully(t, FindOutput) + + actual, err := stackresources.Find(fake.ServiceClient(), "hello_world").Extract() + th.AssertNoErr(t, err) + + expected := FindExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResources(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, ListOutput) + + count := 0 + err := stackresources.List(fake.ServiceClient(), "hello_world", "49181cd6-169a-4130-9455-31185bbfc5bf", nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := stackresources.ExtractResources(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetResource(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := stackresources.Get(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestResourceMetadata(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleMetadataSuccessfully(t, MetadataOutput) + + actual, err := stackresources.Metadata(fake.ServiceClient(), "teststack", "0b1771bd-9336-4f2b-ae86-a80f971faf1e", "wordpress_instance").Extract() + th.AssertNoErr(t, err) + + expected := MetadataExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListResourceTypes(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListTypesSuccessfully(t, ListTypesOutput) + + count := 0 + err := stackresources.ListTypes(fake.ServiceClient()).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := stackresources.ExtractResourceTypes(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListTypesExpected, actual) + // test if sorting works + sort.Sort(actual) + th.CheckDeepEquals(t, SortedListTypesExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, 1, count) +} + +func TestGetResourceSchema(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSchemaSuccessfully(t, GetSchemaOutput) + + actual, err := stackresources.Schema(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := GetSchemaExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestGetResourceTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetTemplateSuccessfully(t, GetTemplateOutput) + + actual, err := stackresources.Template(fake.ServiceClient(), "OS::Heat::AResourceName").Extract() + th.AssertNoErr(t, err) + + expected := GetTemplateExpected + th.AssertDeepEquals(t, expected, string(actual)) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/urls.go new file mode 100644 index 000000000000..bbddc69cbcda --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stackresources/urls.go @@ -0,0 +1,31 @@ +package stackresources + +import "github.com/gophercloud/gophercloud" + +func findURL(c *gophercloud.ServiceClient, stackName string) string { + return c.ServiceURL("stacks", stackName, "resources") +} + +func listURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources") +} + +func getURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName) +} + +func metadataURL(c *gophercloud.ServiceClient, stackName, stackID, resourceName string) string { + return c.ServiceURL("stacks", stackName, stackID, "resources", resourceName, "metadata") +} + +func listTypesURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("resource_types") +} + +func schemaURL(c *gophercloud.ServiceClient, typeName string) string { + return c.ServiceURL("resource_types", typeName) +} + +func templateURL(c *gophercloud.ServiceClient, typeName string) string { + return c.ServiceURL("resource_types", typeName, "template") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/doc.go new file mode 100644 index 000000000000..f8f2a6fefbb4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/doc.go @@ -0,0 +1,115 @@ +/* +Package stacks provides operation for working with Heat stacks. A stack is a +group of resources (servers, load balancers, databases, and so forth) +combined to fulfill a useful purpose. Based on a template, Heat orchestration +engine creates an instantiated set of resources (a stack) to run the +application framework or component specified (in the template). A stack is a +running instance of a template. The result of creating a stack is a deployment +of the application framework or component. + +Summary of Behavior Between Stack Update and UpdatePatch Methods : + +Function | Test Case | Result + +Update() | Template AND Parameters WITH Conflict | Parameter takes priority, parameters are set in raw_template.environment overlay +Update() | Template ONLY | Template updates, raw_template.environment overlay is removed +Update() | Parameters ONLY | No update, template is required + +UpdatePatch() | Template AND Parameters WITH Conflict | Parameter takes priority, parameters are set in raw_template.environment overlay +UpdatePatch() | Template ONLY | Template updates, but raw_template.environment overlay is not removed, existing parameter values will remain +UpdatePatch() | Parameters ONLY | Parameters (raw_template.environment) is updated, excluded values are unchanged + +The PUT Update() function will remove parameters from the raw_template.environment overlay +if they are excluded from the operation, whereas PATCH Update() will never be destructive to the +raw_template.environment overlay. It is not possible to expose the raw_template values with a +patch update once they have been added to the environment overlay with the PATCH verb, but +newly added values that do not have a corresponding key in the overlay will display the +raw_template value. + +Example to Update a Stack Using the Update (PUT) Method + + t := make(map[string]interface{}) + f, err := ioutil.ReadFile("template.yaml") + if err != nil { + panic(err) + } + err = yaml.Unmarshal(f, t) + if err != nil { + panic(err) + } + + template := stacks.Template{} + template.TE = stacks.TE{ + Bin: f, + } + + var params = make(map[string]interface{}) + params["number_of_nodes"] = 2 + + stackName := "my_stack" + stackId := "d68cc349-ccc5-4b44-a17d-07f068c01e5a" + + stackOpts := &stacks.UpdateOpts{ + Parameters: params, + TemplateOpts: &template, + } + + res := stacks.Update(orchestrationClient, stackName, stackId, stackOpts) + if res.Err != nil { + panic(res.Err) + } + +Example to Update a Stack Using the UpdatePatch (PATCH) Method + + var params = make(map[string]interface{}) + params["number_of_nodes"] = 2 + + stackName := "my_stack" + stackId := "d68cc349-ccc5-4b44-a17d-07f068c01e5a" + + stackOpts := &stacks.UpdateOpts{ + Parameters: params, + } + + res := stacks.UpdatePatch(orchestrationClient, stackName, stackId, stackOpts) + if res.Err != nil { + panic(res.Err) + } + +Example YAML Template Containing a Heat::ResourceGroup With Three Nodes + + heat_template_version: 2016-04-08 + + parameters: + number_of_nodes: + type: number + default: 3 + description: the number of nodes + node_flavor: + type: string + default: m1.small + description: node flavor + node_image: + type: string + default: centos7.5-latest + description: node os image + node_network: + type: string + default: my-node-network + description: node network name + + resources: + resource_group: + type: OS::Heat::ResourceGroup + properties: + count: { get_param: number_of_nodes } + resource_def: + type: OS::Nova::Server + properties: + name: my_nova_server_%index% + image: { get_param: node_image } + flavor: { get_param: node_flavor } + networks: + - network: {get_param: node_network} +*/ +package stacks diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment.go new file mode 100644 index 000000000000..86989186fa1a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment.go @@ -0,0 +1,134 @@ +package stacks + +import "strings" + +// Environment is a structure that represents stack environments +type Environment struct { + TE +} + +// EnvironmentSections is a map containing allowed sections in a stack environment file +var EnvironmentSections = map[string]bool{ + "parameters": true, + "parameter_defaults": true, + "resource_registry": true, +} + +// Validate validates the contents of the Environment +func (e *Environment) Validate() error { + if e.Parsed == nil { + if err := e.Parse(); err != nil { + return err + } + } + for key := range e.Parsed { + if _, ok := EnvironmentSections[key]; !ok { + return ErrInvalidEnvironment{Section: key} + } + } + return nil +} + +// Parse environment file to resolve the URL's of the resources. This is done by +// reading from the `Resource Registry` section, which is why the function is +// named GetRRFileContents. +func (e *Environment) getRRFileContents(ignoreIf igFunc) error { + // initialize environment if empty + if e.Files == nil { + e.Files = make(map[string]string) + } + if e.fileMaps == nil { + e.fileMaps = make(map[string]string) + } + + // get the resource registry + rr := e.Parsed["resource_registry"] + + // search the resource registry for URLs + switch rr.(type) { + // process further only if the resource registry is a map + case map[string]interface{}, map[interface{}]interface{}: + rrMap, err := toStringKeys(rr) + if err != nil { + return err + } + // the resource registry might contain a base URL for the resource. If + // such a field is present, use it. Otherwise, use the default base URL. + var baseURL string + if val, ok := rrMap["base_url"]; ok { + baseURL = val.(string) + } else { + baseURL = e.baseURL + } + + // The contents of the resource may be located in a remote file, which + // will be a template. Instantiate a temporary template to manage the + // contents. + tempTemplate := new(Template) + tempTemplate.baseURL = baseURL + tempTemplate.client = e.client + + // Fetch the contents of remote resource URL's + if err = tempTemplate.getFileContents(rr, ignoreIf, false); err != nil { + return err + } + // check the `resources` section (if it exists) for more URL's. Note that + // the previous call to GetFileContents was (deliberately) not recursive + // as we want more control over where to look for URL's + if val, ok := rrMap["resources"]; ok { + switch val.(type) { + // process further only if the contents are a map + case map[string]interface{}, map[interface{}]interface{}: + resourcesMap, err := toStringKeys(val) + if err != nil { + return err + } + for _, v := range resourcesMap { + switch v.(type) { + case map[string]interface{}, map[interface{}]interface{}: + resourceMap, err := toStringKeys(v) + if err != nil { + return err + } + var resourceBaseURL string + // if base_url for the resource type is defined, use it + if val, ok := resourceMap["base_url"]; ok { + resourceBaseURL = val.(string) + } else { + resourceBaseURL = baseURL + } + tempTemplate.baseURL = resourceBaseURL + if err := tempTemplate.getFileContents(v, ignoreIf, false); err != nil { + return err + } + } + } + } + } + // if the resource registry contained any URL's, store them. This can + // then be passed as parameter to api calls to Heat api. + e.Files = tempTemplate.Files + return nil + default: + return nil + } +} + +// function to choose keys whose values are other environment files +func ignoreIfEnvironment(key string, value interface{}) bool { + // base_url and hooks refer to components which cannot have urls + if key == "base_url" || key == "hooks" { + return true + } + // if value is not string, it cannot be a URL + valueString, ok := value.(string) + if !ok { + return true + } + // if value contains `::`, it must be a reference to another resource type + // e.g. OS::Nova::Server : Rackspace::Cloud::Server + if strings.Contains(valueString, "::") { + return true + } + return false +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment_test.go new file mode 100644 index 000000000000..6fcc230d4391 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/environment_test.go @@ -0,0 +1,192 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestEnvironmentValidation(t *testing.T) { + + environmentJSON := new(Environment) + environmentJSON.Bin = []byte(ValidJSONEnvironment) + err := environmentJSON.Validate() + th.AssertNoErr(t, err) + + environmentYAML := new(Environment) + environmentYAML.Bin = []byte(ValidYAMLEnvironment) + err = environmentYAML.Validate() + th.AssertNoErr(t, err) + + environmentInvalid := new(Environment) + environmentInvalid.Bin = []byte(InvalidEnvironment) + if err = environmentInvalid.Validate(); err == nil { + t.Error("environment validation did not catch invalid environment") + } +} + +func TestEnvironmentParsing(t *testing.T) { + environmentJSON := new(Environment) + environmentJSON.Bin = []byte(ValidJSONEnvironment) + err := environmentJSON.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONEnvironmentParsed, environmentJSON.Parsed) + + environmentYAML := new(Environment) + environmentYAML.Bin = []byte(ValidJSONEnvironment) + err = environmentYAML.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONEnvironmentParsed, environmentYAML.Parsed) + + environmentInvalid := new(Environment) + environmentInvalid.Bin = []byte("Keep Austin Weird") + err = environmentInvalid.Parse() + if err == nil { + t.Error("environment parsing did not catch invalid environment") + } +} + +func TestIgnoreIfEnvironment(t *testing.T) { + var keyValueTests = []struct { + key string + value interface{} + out bool + }{ + {"base_url", "afksdf", true}, + {"not_type", "hooks", false}, + {"get_file", "::", true}, + {"hooks", "dfsdfsd", true}, + {"type", "sdfubsduf.yaml", false}, + {"type", "sdfsdufs.environment", false}, + {"type", "sdfsdf.file", false}, + {"type", map[string]string{"key": "value"}, true}, + } + var result bool + for _, kv := range keyValueTests { + result = ignoreIfEnvironment(kv.key, kv.value) + if result != kv.out { + t.Errorf("key: %v, value: %v expected: %v, actual: %v", kv.key, kv.value, kv.out, result) + } + } +} + +func TestGetRRFileContents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + environmentContent := ` +heat_template_version: 2013-05-23 + +description: + Heat WordPress template to support F18, using only Heat OpenStack-native + resource types, and without the requirement for heat-cfntools in the image. + WordPress is web software you can use to create a beautiful website or blog. + This template installs a single-instance WordPress deployment using a local + MySQL database to store the data. + +parameters: + + key_name: + type: string + description : Name of a KeyPair to enable SSH access to the instance + +resources: + wordpress_instance: + type: OS::Nova::Server + properties: + image: { get_param: image_id } + flavor: { get_param: instance_type } + key_name: { get_param: key_name }` + + dbContent := ` +heat_template_version: 2014-10-16 + +description: + Test template for Trove resource capabilities + +parameters: + db_pass: + type: string + hidden: true + description: Database access password + default: secrete + +resources: + +service_db: + type: OS::Trove::Instance + properties: + name: trove_test_db + datastore_type: mariadb + flavor: 1GB Instance + size: 10 + databases: + - name: test_data + users: + - name: kitchen_sink + password: { get_param: db_pass } + databases: [ test_data ]` + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + + fakeEnvURL := strings.Join([]string{baseurl, "my_env.yaml"}, "/") + urlparsed, err := url.Parse(fakeEnvURL) + th.AssertNoErr(t, err) + // handler for my_env.yaml + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, environmentContent) + }) + + fakeDBURL := strings.Join([]string{baseurl, "my_db.yaml"}, "/") + urlparsed, err = url.Parse(fakeDBURL) + th.AssertNoErr(t, err) + + // handler for my_db.yaml + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, dbContent) + }) + + client := fakeClient{BaseClient: getHTTPClient()} + env := new(Environment) + env.Bin = []byte(`{"resource_registry": {"My::WP::Server": "my_env.yaml", "resources": {"my_db_server": {"OS::DBInstance": "my_db.yaml"}}}}`) + env.client = client + + err = env.Parse() + th.AssertNoErr(t, err) + err = env.getRRFileContents(ignoreIfEnvironment) + th.AssertNoErr(t, err) + expectedEnvFilesContent := "\nheat_template_version: 2013-05-23\n\ndescription:\n Heat WordPress template to support F18, using only Heat OpenStack-native\n resource types, and without the requirement for heat-cfntools in the image.\n WordPress is web software you can use to create a beautiful website or blog.\n This template installs a single-instance WordPress deployment using a local\n MySQL database to store the data.\n\nparameters:\n\n key_name:\n type: string\n description : Name of a KeyPair to enable SSH access to the instance\n\nresources:\n wordpress_instance:\n type: OS::Nova::Server\n properties:\n image: { get_param: image_id }\n flavor: { get_param: instance_type }\n key_name: { get_param: key_name }" + expectedDBFilesContent := "\nheat_template_version: 2014-10-16\n\ndescription:\n Test template for Trove resource capabilities\n\nparameters:\n db_pass:\n type: string\n hidden: true\n description: Database access password\n default: secrete\n\nresources:\n\nservice_db:\n type: OS::Trove::Instance\n properties:\n name: trove_test_db\n datastore_type: mariadb\n flavor: 1GB Instance\n size: 10\n databases:\n - name: test_data\n users:\n - name: kitchen_sink\n password: { get_param: db_pass }\n databases: [ test_data ]" + + th.AssertEquals(t, expectedEnvFilesContent, env.Files[fakeEnvURL]) + th.AssertEquals(t, expectedDBFilesContent, env.Files[fakeDBURL]) + + // Update env's fileMaps to replace relative filenames by absolute URLs. + env.fileMaps = map[string]string{ + "my_env.yaml": fakeEnvURL, + "my_db.yaml": fakeDBURL, + } + env.fixFileRefs() + + expectedParsed := map[string]interface{}{ + "resource_registry": map[string]interface{}{ + "My::WP::Server": fakeEnvURL, + "resources": map[string]interface{}{ + "my_db_server": map[string]interface{}{ + "OS::DBInstance": fakeDBURL, + }, + }, + }, + } + env.Parse() + th.AssertDeepEquals(t, expectedParsed, env.Parsed) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/errors.go new file mode 100644 index 000000000000..a6febe040841 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/errors.go @@ -0,0 +1,41 @@ +package stacks + +import ( + "fmt" + + "github.com/gophercloud/gophercloud" +) + +type ErrInvalidEnvironment struct { + gophercloud.BaseError + Section string +} + +func (e ErrInvalidEnvironment) Error() string { + return fmt.Sprintf("Environment has wrong section: %s", e.Section) +} + +type ErrInvalidDataFormat struct { + gophercloud.BaseError +} + +func (e ErrInvalidDataFormat) Error() string { + return fmt.Sprintf("Data in neither json nor yaml format.") +} + +type ErrInvalidTemplateFormatVersion struct { + gophercloud.BaseError + Version string +} + +func (e ErrInvalidTemplateFormatVersion) Error() string { + return fmt.Sprintf("Template format version not found.") +} + +type ErrTemplateRequired struct { + gophercloud.BaseError +} + +func (e ErrTemplateRequired) Error() string { + return fmt.Sprintf("Template required for this function.") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/fixtures.go new file mode 100644 index 000000000000..58987d4bfd51 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/fixtures.go @@ -0,0 +1,199 @@ +package stacks + +// ValidJSONTemplate is a valid OpenStack Heat template in JSON format +const ValidJSONTemplate = ` +{ + "heat_template_version": "2014-10-16", + "parameters": { + "flavor": { + "default": "debian2G", + "description": "Flavor for the server to be created", + "hidden": true, + "type": "string" + } + }, + "resources": { + "test_server": { + "properties": { + "flavor": "2 GB General Purpose v1", + "image": "Debian 7 (Wheezy) (PVHVM)", + "name": "test-server" + }, + "type": "OS::Nova::Server" + } + } +} +` + +// ValidYAMLTemplate is a valid OpenStack Heat template in YAML format +const ValidYAMLTemplate = ` +heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: debian2G + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +` + +// InvalidTemplateNoVersion is an invalid template as it has no `version` section +const InvalidTemplateNoVersion = ` +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: debian2G + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +` + +// ValidJSONEnvironment is a valid environment for a stack in JSON format +const ValidJSONEnvironment = ` +{ + "parameters": { + "user_key": "userkey" + }, + "resource_registry": { + "My::WP::Server": "file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml", + "OS::Quantum*": "OS::Neutron*", + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml", + "OS::Metering::Alarm": "OS::Ceilometer::Alarm", + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml", + "resources": { + "my_db_server": { + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml" + }, + "my_server": { + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + "hooks": "pre-create" + }, + "nested_stack": { + "nested_resource": { + "hooks": "pre-update" + }, + "another_resource": { + "hooks": [ + "pre-create", + "pre-update" + ] + } + } + } + } +} +` + +// ValidYAMLEnvironment is a valid environment for a stack in YAML format +const ValidYAMLEnvironment = ` +parameters: + user_key: userkey +resource_registry: + My::WP::Server: file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml + # allow older templates with Quantum in them. + "OS::Quantum*": "OS::Neutron*" + # Choose your implementation of AWS::CloudWatch::Alarm + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml" + #"AWS::CloudWatch::Alarm": "OS::Heat::CWLiteAlarm" + "OS::Metering::Alarm": "OS::Ceilometer::Alarm" + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml" + resources: + my_db_server: + "OS::DBInstance": file:///home/mine/all_my_cool_templates/db.yaml + my_server: + "OS::DBInstance": file:///home/mine/all_my_cool_templates/db.yaml + hooks: pre-create + nested_stack: + nested_resource: + hooks: pre-update + another_resource: + hooks: [pre-create, pre-update] +` + +// InvalidEnvironment is an invalid environment as it has an extra section called `resources` +const InvalidEnvironment = ` +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: debian2G + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) +parameter_defaults: + KeyName: heat_key +` + +// ValidJSONEnvironmentParsed is the expected parsed version of ValidJSONEnvironment +var ValidJSONEnvironmentParsed = map[string]interface{}{ + "parameters": map[string]interface{}{ + "user_key": "userkey", + }, + "resource_registry": map[string]interface{}{ + "My::WP::Server": "file:///home/shardy/git/heat-templates/hot/F18/WordPress_Native.yaml", + "OS::Quantum*": "OS::Neutron*", + "AWS::CloudWatch::Alarm": "file:///etc/heat/templates/AWS_CloudWatch_Alarm.yaml", + "OS::Metering::Alarm": "OS::Ceilometer::Alarm", + "AWS::RDS::DBInstance": "file:///etc/heat/templates/AWS_RDS_DBInstance.yaml", + "resources": map[string]interface{}{ + "my_db_server": map[string]interface{}{ + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + }, + "my_server": map[string]interface{}{ + "OS::DBInstance": "file:///home/mine/all_my_cool_templates/db.yaml", + "hooks": "pre-create", + }, + "nested_stack": map[string]interface{}{ + "nested_resource": map[string]interface{}{ + "hooks": "pre-update", + }, + "another_resource": map[string]interface{}{ + "hooks": []interface{}{ + "pre-create", + "pre-update", + }, + }, + }, + }, + }, +} + +// ValidJSONTemplateParsed is the expected parsed version of ValidJSONTemplate +var ValidJSONTemplateParsed = map[string]interface{}{ + "heat_template_version": "2014-10-16", + "parameters": map[string]interface{}{ + "flavor": map[string]interface{}{ + "default": "debian2G", + "description": "Flavor for the server to be created", + "hidden": true, + "type": "string", + }, + }, + "resources": map[string]interface{}{ + "test_server": map[string]interface{}{ + "properties": map[string]interface{}{ + "flavor": "2 GB General Purpose v1", + "image": "Debian 7 (Wheezy) (PVHVM)", + "name": "test-server", + }, + "type": "OS::Nova::Server", + }, + }, +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/requests.go new file mode 100644 index 000000000000..5df222ff0c6b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/requests.go @@ -0,0 +1,476 @@ +package stacks + +import ( + "strings" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the main Create operation in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type CreateOptsBuilder interface { + ToStackCreateMap() (map[string]interface{}, error) +} + +// CreateOpts is the common options struct used in this package's Create +// operation. +type CreateOpts struct { + // The name of the stack. It must start with an alphabetic character. + Name string `json:"stack_name" required:"true"` + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template `json:"-" required:"true"` + // Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback *bool `json:"disable_rollback,omitempty"` + // A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment `json:"-"` + // User-defined parameters to pass to the template. + Parameters map[string]string `json:"parameters,omitempty"` + // The timeout for stack creation in minutes. + Timeout int `json:"timeout_mins,omitempty"` + // A list of tags to assosciate with the Stack + Tags []string `json:"-"` +} + +// ToStackCreateMap casts a CreateOpts struct to a map. +func (opts CreateOpts) ToStackCreateMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + b["template"] = string(opts.TemplateOpts.Bin) + + files := make(map[string]string) + for k, v := range opts.TemplateOpts.Files { + files[k] = v + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + files[k] = v + } + b["environment"] = string(opts.EnvironmentOpts.Bin) + } + + if len(files) > 0 { + b["files"] = files + } + + if opts.Tags != nil { + b["tags"] = strings.Join(opts.Tags, ",") + } + + return b, nil +} + +// Create accepts a CreateOpts struct and creates a new stack using the values +// provided. +func Create(c *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToStackCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(createURL(c), b, &r.Body, nil) + return +} + +// AdoptOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Adopt function in this package. Since many +// extensions decorate or modify the common logic, it is useful for them to +// satisfy a basic interface in order for them to be used. +type AdoptOptsBuilder interface { + ToStackAdoptMap() (map[string]interface{}, error) +} + +// AdoptOpts is the common options struct used in this package's Adopt +// operation. +type AdoptOpts struct { + // Existing resources data represented as a string to add to the + // new stack. Data returned by Abandon could be provided as AdoptsStackData. + AdoptStackData string `json:"adopt_stack_data" required:"true"` + // The name of the stack. It must start with an alphabetic character. + Name string `json:"stack_name" required:"true"` + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template `json:"-" required:"true"` + // The timeout for stack creation in minutes. + Timeout int `json:"timeout_mins,omitempty"` + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + //TemplateOpts *Template `json:"-" required:"true"` + // Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback *bool `json:"disable_rollback,omitempty"` + // A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment `json:"-"` + // User-defined parameters to pass to the template. + Parameters map[string]string `json:"parameters,omitempty"` +} + +// ToStackAdoptMap casts a CreateOpts struct to a map. +func (opts AdoptOpts) ToStackAdoptMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + b["template"] = string(opts.TemplateOpts.Bin) + + files := make(map[string]string) + for k, v := range opts.TemplateOpts.Files { + files[k] = v + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + files[k] = v + } + b["environment"] = string(opts.EnvironmentOpts.Bin) + } + + if len(files) > 0 { + b["files"] = files + } + + return b, nil +} + +// Adopt accepts an AdoptOpts struct and creates a new stack using the resources +// from another stack. +func Adopt(c *gophercloud.ServiceClient, opts AdoptOptsBuilder) (r AdoptResult) { + b, err := opts.ToStackAdoptMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(adoptURL(c), b, &r.Body, nil) + return +} + +// SortDir is a type for specifying in which direction to sort a list of stacks. +type SortDir string + +// SortKey is a type for specifying by which key to sort a list of stacks. +type SortKey string + +var ( + // SortAsc is used to sort a list of stacks in ascending order. + SortAsc SortDir = "asc" + // SortDesc is used to sort a list of stacks in descending order. + SortDesc SortDir = "desc" + // SortName is used to sort a list of stacks by name. + SortName SortKey = "name" + // SortStatus is used to sort a list of stacks by status. + SortStatus SortKey = "status" + // SortCreatedAt is used to sort a list of stacks by date created. + SortCreatedAt SortKey = "created_at" + // SortUpdatedAt is used to sort a list of stacks by date updated. + SortUpdatedAt SortKey = "updated_at" +) + +// ListOptsBuilder allows extensions to add additional parameters to the +// List request. +type ListOptsBuilder interface { + ToStackListQuery() (string, error) +} + +// ListOpts allows the filtering and sorting of paginated collections through +// the API. Filtering is achieved by passing in struct field values that map to +// the network attributes you want to see returned. SortKey allows you to sort +// by a particular network attribute. SortDir sets the direction, and is either +// `asc' or `desc'. Marker and Limit are used for pagination. +type ListOpts struct { + Status string `q:"status"` + Name string `q:"name"` + Marker string `q:"marker"` + Limit int `q:"limit"` + SortKey SortKey `q:"sort_keys"` + SortDir SortDir `q:"sort_dir"` +} + +// ToStackListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToStackListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + if err != nil { + return "", err + } + return q.String(), nil +} + +// List returns a Pager which allows you to iterate over a collection of +// stacks. It accepts a ListOpts struct, which allows you to filter and sort +// the returned collection for greater efficiency. +func List(c *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(c) + if opts != nil { + query, err := opts.ToStackListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + createPage := func(r pagination.PageResult) pagination.Page { + return StackPage{pagination.SinglePageBase(r)} + } + return pagination.NewPager(c, url, createPage) +} + +// Get retreives a stack based on the stack name and stack ID. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) (r GetResult) { + _, r.Err = c.Get(getURL(c, stackName, stackID), &r.Body, nil) + return +} + +// UpdateOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Update operation in this package. +type UpdateOptsBuilder interface { + ToStackUpdateMap() (map[string]interface{}, error) +} + +// UpdatePatchOptsBuilder is the interface options structs have to satisfy in order +// to be used in the UpdatePatch operation in this package +type UpdatePatchOptsBuilder interface { + ToStackUpdatePatchMap() (map[string]interface{}, error) +} + +// UpdateOpts contains the common options struct used in this package's Update +// and UpdatePatch operations. +type UpdateOpts struct { + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template `json:"-"` + // A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment `json:"-"` + // User-defined parameters to pass to the template. + Parameters map[string]interface{} `json:"parameters,omitempty"` + // The timeout for stack creation in minutes. + Timeout int `json:"timeout_mins,omitempty"` + // A list of tags to associate with the Stack + Tags []string `json:"-"` +} + +// ToStackUpdateMap validates that a template was supplied and calls +// the toStackUpdateMap private function. +func (opts UpdateOpts) ToStackUpdateMap() (map[string]interface{}, error) { + if opts.TemplateOpts == nil { + return nil, ErrTemplateRequired{} + } + return toStackUpdateMap(opts) +} + +// ToStackUpdatePatchMap calls the private function toStackUpdateMap +// directly. +func (opts UpdateOpts) ToStackUpdatePatchMap() (map[string]interface{}, error) { + return toStackUpdateMap(opts) +} + +// ToStackUpdateMap casts a CreateOpts struct to a map. +func toStackUpdateMap(opts UpdateOpts) (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + files := make(map[string]string) + + if opts.TemplateOpts != nil { + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + b["template"] = string(opts.TemplateOpts.Bin) + + for k, v := range opts.TemplateOpts.Files { + files[k] = v + } + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + files[k] = v + } + b["environment"] = string(opts.EnvironmentOpts.Bin) + } + + if len(files) > 0 { + b["files"] = files + } + + if opts.Tags != nil { + b["tags"] = strings.Join(opts.Tags, ",") + } + + return b, nil +} + +// Update accepts an UpdateOpts struct and updates an existing stack using the +// http PUT verb with the values provided. opts.TemplateOpts is required. +func Update(c *gophercloud.ServiceClient, stackName, stackID string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToStackUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Put(updateURL(c, stackName, stackID), b, nil, nil) + return +} + +// Update accepts an UpdateOpts struct and updates an existing stack using the +// http PATCH verb with the values provided. opts.TemplateOpts is not required. +func UpdatePatch(c *gophercloud.ServiceClient, stackName, stackID string, opts UpdatePatchOptsBuilder) (r UpdateResult) { + b, err := opts.ToStackUpdatePatchMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Patch(updateURL(c, stackName, stackID), b, nil, nil) + return +} + +// Delete deletes a stack based on the stack name and stack ID. +func Delete(c *gophercloud.ServiceClient, stackName, stackID string) (r DeleteResult) { + _, r.Err = c.Delete(deleteURL(c, stackName, stackID), nil) + return +} + +// PreviewOptsBuilder is the interface options structs have to satisfy in order +// to be used in the Preview operation in this package. +type PreviewOptsBuilder interface { + ToStackPreviewMap() (map[string]interface{}, error) +} + +// PreviewOpts contains the common options struct used in this package's Preview +// operation. +type PreviewOpts struct { + // The name of the stack. It must start with an alphabetic character. + Name string `json:"stack_name" required:"true"` + // The timeout for stack creation in minutes. + Timeout int `json:"timeout_mins" required:"true"` + // A structure that contains either the template file or url. Call the + // associated methods to extract the information relevant to send in a create request. + TemplateOpts *Template `json:"-" required:"true"` + // Enables or disables deletion of all stack resources when a stack + // creation fails. Default is true, meaning all resources are not deleted when + // stack creation fails. + DisableRollback *bool `json:"disable_rollback,omitempty"` + // A structure that contains details for the environment of the stack. + EnvironmentOpts *Environment `json:"-"` + // User-defined parameters to pass to the template. + Parameters map[string]string `json:"parameters,omitempty"` +} + +// ToStackPreviewMap casts a PreviewOpts struct to a map. +func (opts PreviewOpts) ToStackPreviewMap() (map[string]interface{}, error) { + b, err := gophercloud.BuildRequestBody(opts, "") + if err != nil { + return nil, err + } + + if err := opts.TemplateOpts.Parse(); err != nil { + return nil, err + } + + if err := opts.TemplateOpts.getFileContents(opts.TemplateOpts.Parsed, ignoreIfTemplate, true); err != nil { + return nil, err + } + opts.TemplateOpts.fixFileRefs() + b["template"] = string(opts.TemplateOpts.Bin) + + files := make(map[string]string) + for k, v := range opts.TemplateOpts.Files { + files[k] = v + } + + if opts.EnvironmentOpts != nil { + if err := opts.EnvironmentOpts.Parse(); err != nil { + return nil, err + } + if err := opts.EnvironmentOpts.getRRFileContents(ignoreIfEnvironment); err != nil { + return nil, err + } + opts.EnvironmentOpts.fixFileRefs() + for k, v := range opts.EnvironmentOpts.Files { + files[k] = v + } + b["environment"] = string(opts.EnvironmentOpts.Bin) + } + + if len(files) > 0 { + b["files"] = files + } + + return b, nil +} + +// Preview accepts a PreviewOptsBuilder interface and creates a preview of a stack using the values +// provided. +func Preview(c *gophercloud.ServiceClient, opts PreviewOptsBuilder) (r PreviewResult) { + b, err := opts.ToStackPreviewMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(previewURL(c), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Abandon deletes the stack with the provided stackName and stackID, but leaves its +// resources intact, and returns data describing the stack and its resources. +func Abandon(c *gophercloud.ServiceClient, stackName, stackID string) (r AbandonResult) { + _, r.Err = c.Delete(abandonURL(c, stackName, stackID), &gophercloud.RequestOpts{ + JSONResponse: &r.Body, + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/results.go new file mode 100644 index 000000000000..8df541940ec0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/results.go @@ -0,0 +1,238 @@ +package stacks + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreatedStack represents the object extracted from a Create operation. +type CreatedStack struct { + ID string `json:"id"` + Links []gophercloud.Link `json:"links"` +} + +// CreateResult represents the result of a Create operation. +type CreateResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a CreatedStack object and is called after a +// Create operation. +func (r CreateResult) Extract() (*CreatedStack, error) { + var s struct { + CreatedStack *CreatedStack `json:"stack"` + } + err := r.ExtractInto(&s) + return s.CreatedStack, err +} + +// AdoptResult represents the result of an Adopt operation. AdoptResult has the +// same form as CreateResult. +type AdoptResult struct { + CreateResult +} + +// StackPage is a pagination.Pager that is returned from a call to the List function. +type StackPage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no Stacks. +func (r StackPage) IsEmpty() (bool, error) { + stacks, err := ExtractStacks(r) + return len(stacks) == 0, err +} + +// ListedStack represents an element in the slice extracted from a List operation. +type ListedStack struct { + CreationTime time.Time `json:"-"` + Description string `json:"description"` + ID string `json:"id"` + Links []gophercloud.Link `json:"links"` + Name string `json:"stack_name"` + Status string `json:"stack_status"` + StatusReason string `json:"stack_status_reason"` + Tags []string `json:"tags"` + UpdatedTime time.Time `json:"-"` +} + +func (r *ListedStack) UnmarshalJSON(b []byte) error { + type tmp ListedStack + var s struct { + tmp + CreationTime gophercloud.JSONRFC3339NoZ `json:"creation_time"` + UpdatedTime gophercloud.JSONRFC3339NoZ `json:"updated_time"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ListedStack(s.tmp) + + r.CreationTime = time.Time(s.CreationTime) + r.UpdatedTime = time.Time(s.UpdatedTime) + + return nil +} + +// ExtractStacks extracts and returns a slice of ListedStack. It is used while iterating +// over a stacks.List call. +func ExtractStacks(r pagination.Page) ([]ListedStack, error) { + var s struct { + ListedStacks []ListedStack `json:"stacks"` + } + err := (r.(StackPage)).ExtractInto(&s) + return s.ListedStacks, err +} + +// RetrievedStack represents the object extracted from a Get operation. +type RetrievedStack struct { + Capabilities []interface{} `json:"capabilities"` + CreationTime time.Time `json:"-"` + Description string `json:"description"` + DisableRollback bool `json:"disable_rollback"` + ID string `json:"id"` + Links []gophercloud.Link `json:"links"` + NotificationTopics []interface{} `json:"notification_topics"` + Outputs []map[string]interface{} `json:"outputs"` + Parameters map[string]string `json:"parameters"` + Name string `json:"stack_name"` + Status string `json:"stack_status"` + StatusReason string `json:"stack_status_reason"` + Tags []string `json:"tags"` + TemplateDescription string `json:"template_description"` + Timeout int `json:"timeout_mins"` + UpdatedTime time.Time `json:"-"` +} + +func (r *RetrievedStack) UnmarshalJSON(b []byte) error { + type tmp RetrievedStack + var s struct { + tmp + CreationTime gophercloud.JSONRFC3339NoZ `json:"creation_time"` + UpdatedTime gophercloud.JSONRFC3339NoZ `json:"updated_time"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = RetrievedStack(s.tmp) + + r.CreationTime = time.Time(s.CreationTime) + r.UpdatedTime = time.Time(s.UpdatedTime) + + return nil +} + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a RetrievedStack object and is called after a +// Get operation. +func (r GetResult) Extract() (*RetrievedStack, error) { + var s struct { + Stack *RetrievedStack `json:"stack"` + } + err := r.ExtractInto(&s) + return s.Stack, err +} + +// UpdateResult represents the result of a Update operation. +type UpdateResult struct { + gophercloud.ErrResult +} + +// DeleteResult represents the result of a Delete operation. +type DeleteResult struct { + gophercloud.ErrResult +} + +// PreviewedStack represents the result of a Preview operation. +type PreviewedStack struct { + Capabilities []interface{} `json:"capabilities"` + CreationTime time.Time `json:"-"` + Description string `json:"description"` + DisableRollback bool `json:"disable_rollback"` + ID string `json:"id"` + Links []gophercloud.Link `json:"links"` + Name string `json:"stack_name"` + NotificationTopics []interface{} `json:"notification_topics"` + Parameters map[string]string `json:"parameters"` + Resources []interface{} `json:"resources"` + TemplateDescription string `json:"template_description"` + Timeout int `json:"timeout_mins"` + UpdatedTime time.Time `json:"-"` +} + +func (r *PreviewedStack) UnmarshalJSON(b []byte) error { + type tmp PreviewedStack + var s struct { + tmp + CreationTime gophercloud.JSONRFC3339NoZ `json:"creation_time"` + UpdatedTime gophercloud.JSONRFC3339NoZ `json:"updated_time"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = PreviewedStack(s.tmp) + + r.CreationTime = time.Time(s.CreationTime) + r.UpdatedTime = time.Time(s.UpdatedTime) + + return nil +} + +// PreviewResult represents the result of a Preview operation. +type PreviewResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a PreviewedStack object and is called after a +// Preview operation. +func (r PreviewResult) Extract() (*PreviewedStack, error) { + var s struct { + PreviewedStack *PreviewedStack `json:"stack"` + } + err := r.ExtractInto(&s) + return s.PreviewedStack, err +} + +// AbandonedStack represents the result of an Abandon operation. +type AbandonedStack struct { + Status string `json:"status"` + Name string `json:"name"` + Template map[string]interface{} `json:"template"` + Action string `json:"action"` + ID string `json:"id"` + Resources map[string]interface{} `json:"resources"` + Files map[string]string `json:"files"` + StackUserProjectID string `json:"stack_user_project_id"` + ProjectID string `json:"project_id"` + Environment map[string]interface{} `json:"environment"` +} + +// AbandonResult represents the result of an Abandon operation. +type AbandonResult struct { + gophercloud.Result +} + +// Extract returns a pointer to an AbandonedStack object and is called after an +// Abandon operation. +func (r AbandonResult) Extract() (*AbandonedStack, error) { + var s *AbandonedStack + err := r.ExtractInto(&s) + return s, err +} + +// String converts an AbandonResult to a string. This is useful to when passing +// the result of an Abandon operation to an AdoptOpts AdoptStackData field. +func (r AbandonResult) String() (string, error) { + out, err := json.Marshal(r) + return string(out), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template.go new file mode 100644 index 000000000000..4cf5aae41a87 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template.go @@ -0,0 +1,141 @@ +package stacks + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Template is a structure that represents OpenStack Heat templates +type Template struct { + TE +} + +// TemplateFormatVersions is a map containing allowed variations of the template format version +// Note that this contains the permitted variations of the _keys_ not the values. +var TemplateFormatVersions = map[string]bool{ + "HeatTemplateFormatVersion": true, + "heat_template_version": true, + "AWSTemplateFormatVersion": true, +} + +// Validate validates the contents of the Template +func (t *Template) Validate() error { + if t.Parsed == nil { + if err := t.Parse(); err != nil { + return err + } + } + var invalid string + for key := range t.Parsed { + if _, ok := TemplateFormatVersions[key]; ok { + return nil + } + invalid = key + } + return ErrInvalidTemplateFormatVersion{Version: invalid} +} + +// GetFileContents recursively parses a template to search for urls. These urls +// are assumed to point to other templates (known in OpenStack Heat as child +// templates). The contents of these urls are fetched and stored in the `Files` +// parameter of the template structure. This is the only way that a user can +// use child templates that are located in their filesystem; urls located on the +// web (e.g. on github or swift) can be fetched directly by Heat engine. +func (t *Template) getFileContents(te interface{}, ignoreIf igFunc, recurse bool) error { + // initialize template if empty + if t.Files == nil { + t.Files = make(map[string]string) + } + if t.fileMaps == nil { + t.fileMaps = make(map[string]string) + } + switch te.(type) { + // if te is a map + case map[string]interface{}, map[interface{}]interface{}: + teMap, err := toStringKeys(te) + if err != nil { + return err + } + for k, v := range teMap { + value, ok := v.(string) + if !ok { + // if the value is not a string, recursively parse that value + if err := t.getFileContents(v, ignoreIf, recurse); err != nil { + return err + } + } else if !ignoreIf(k, value) { + // at this point, the k, v pair has a reference to an external template. + // The assumption of heatclient is that value v is a reference + // to a file in the users environment + + // create a new child template + childTemplate := new(Template) + + // initialize child template + + // get the base location of the child template + baseURL, err := gophercloud.NormalizePathURL(t.baseURL, value) + if err != nil { + return err + } + childTemplate.baseURL = baseURL + childTemplate.client = t.client + + // fetch the contents of the child template + if err := childTemplate.Parse(); err != nil { + return err + } + + // process child template recursively if required. This is + // required if the child template itself contains references to + // other templates + if recurse { + if err := childTemplate.getFileContents(childTemplate.Parsed, ignoreIf, recurse); err != nil { + return err + } + } + // update parent template with current child templates' content. + // At this point, the child template has been parsed recursively. + t.fileMaps[value] = childTemplate.URL + t.Files[childTemplate.URL] = string(childTemplate.Bin) + + } + } + return nil + // if te is a slice, call the function on each element of the slice. + case []interface{}: + teSlice := te.([]interface{}) + for i := range teSlice { + if err := t.getFileContents(teSlice[i], ignoreIf, recurse); err != nil { + return err + } + } + // if te is anything else, return + case string, bool, float64, nil, int: + return nil + default: + return gophercloud.ErrUnexpectedType{Actual: fmt.Sprintf("%v", reflect.TypeOf(te))} + } + return nil +} + +// function to choose keys whose values are other template files +func ignoreIfTemplate(key string, value interface{}) bool { + // key must be either `get_file` or `type` for value to be a URL + if key != "get_file" && key != "type" { + return true + } + // value must be a string + valueString, ok := value.(string) + if !ok { + return true + } + // `.template` and `.yaml` are allowed suffixes for template URLs when referred to by `type` + if key == "type" && !(strings.HasSuffix(valueString, ".template") || strings.HasSuffix(valueString, ".yaml")) { + return true + } + return false +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template_test.go new file mode 100644 index 000000000000..1ad9fc9c717c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/template_test.go @@ -0,0 +1,148 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTemplateValidation(t *testing.T) { + templateJSON := new(Template) + templateJSON.Bin = []byte(ValidJSONTemplate) + err := templateJSON.Validate() + th.AssertNoErr(t, err) + + templateYAML := new(Template) + templateYAML.Bin = []byte(ValidYAMLTemplate) + err = templateYAML.Validate() + th.AssertNoErr(t, err) + + templateInvalid := new(Template) + templateInvalid.Bin = []byte(InvalidTemplateNoVersion) + if err = templateInvalid.Validate(); err == nil { + t.Error("Template validation did not catch invalid template") + } +} + +func TestTemplateParsing(t *testing.T) { + templateJSON := new(Template) + templateJSON.Bin = []byte(ValidJSONTemplate) + err := templateJSON.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONTemplateParsed, templateJSON.Parsed) + + templateYAML := new(Template) + templateYAML.Bin = []byte(ValidJSONTemplate) + err = templateYAML.Parse() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, ValidJSONTemplateParsed, templateYAML.Parsed) + + templateInvalid := new(Template) + templateInvalid.Bin = []byte("Keep Austin Weird") + err = templateInvalid.Parse() + if err == nil { + t.Error("Template parsing did not catch invalid template") + } +} + +func TestIgnoreIfTemplate(t *testing.T) { + var keyValueTests = []struct { + key string + value interface{} + out bool + }{ + {"not_get_file", "afksdf", true}, + {"not_type", "sdfd", true}, + {"get_file", "shdfuisd", false}, + {"type", "dfsdfsd", true}, + {"type", "sdfubsduf.yaml", false}, + {"type", "sdfsdufs.template", false}, + {"type", "sdfsdf.file", true}, + {"type", map[string]string{"key": "value"}, true}, + } + var result bool + for _, kv := range keyValueTests { + result = ignoreIfTemplate(kv.key, kv.value) + if result != kv.out { + t.Errorf("key: %v, value: %v expected: %v, actual: %v", kv.key, kv.value, result, kv.out) + } + } +} + +func TestGetFileContents(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + fakeURL := strings.Join([]string{baseurl, "my_nova.yaml"}, "/") + urlparsed, err := url.Parse(fakeURL) + th.AssertNoErr(t, err) + myNovaContent := `heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) + networks: + - {uuid: 11111111-1111-1111-1111-111111111111}` + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, myNovaContent) + }) + + client := fakeClient{BaseClient: getHTTPClient()} + te := new(Template) + te.Bin = []byte(`heat_template_version: 2015-04-30 +resources: + my_server: + type: my_nova.yaml`) + te.client = client + + err = te.Parse() + th.AssertNoErr(t, err) + err = te.getFileContents(te.Parsed, ignoreIfTemplate, true) + th.AssertNoErr(t, err) + expectedFiles := map[string]string{ + "my_nova.yaml": `heat_template_version: 2014-10-16 +parameters: + flavor: + type: string + description: Flavor for the server to be created + default: 4353 + hidden: true +resources: + test_server: + type: "OS::Nova::Server" + properties: + name: test-server + flavor: 2 GB General Purpose v1 + image: Debian 7 (Wheezy) (PVHVM) + networks: + - {uuid: 11111111-1111-1111-1111-111111111111}`} + th.AssertEquals(t, expectedFiles["my_nova.yaml"], te.Files[fakeURL]) + te.fixFileRefs() + expectedParsed := map[string]interface{}{ + "heat_template_version": "2015-04-30", + "resources": map[string]interface{}{ + "my_server": map[string]interface{}{ + "type": fakeURL, + }, + }, + } + te.Parse() + th.AssertDeepEquals(t, expectedParsed, te.Parsed) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/doc.go new file mode 100644 index 000000000000..5b3703ea2fd9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_stacks_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/fixtures.go new file mode 100644 index 000000000000..7c8a2f32d233 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/fixtures.go @@ -0,0 +1,420 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// CreateExpected represents the expected object from a Create request. +var CreateExpected = &stacks.CreatedStack{ + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Links: []gophercloud.Link{ + { + Href: "http://168.28.170.117:8004/v1/98606384f58drad0bhdb7d02779549ac/stacks/stackcreated/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, +} + +// CreateOutput represents the response body from a Create request. +const CreateOutput = ` +{ + "stack": { + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "links": [ + { + "href": "http://168.28.170.117:8004/v1/98606384f58drad0bhdb7d02779549ac/stacks/stackcreated/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ] + } +}` + +// HandleCreateSuccessfully creates an HTTP handler at `/stacks` on the test handler mux +// that responds with a `Create` response. +func HandleCreateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, output) + }) +} + +// ListExpected represents the expected object from a List request. +var ListExpected = []stacks.ListedStack{ + { + Description: "Simple template to test heat commands", + Links: []gophercloud.Link{ + { + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + StatusReason: "Stack CREATE completed successfully", + Name: "postman_stack", + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Status: "CREATE_COMPLETE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Tags: []string{"rackspace", "atx"}, + }, + { + Description: "Simple template to test heat commands", + Links: []gophercloud.Link{ + { + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", + Rel: "self", + }, + }, + StatusReason: "Stack successfully updated", + Name: "gophercloud-test-stack-2", + CreationTime: time.Date(2014, 12, 11, 17, 39, 16, 0, time.UTC), + UpdatedTime: time.Date(2014, 12, 11, 17, 40, 37, 0, time.UTC), + Status: "UPDATE_COMPLETE", + ID: "db6977b2-27aa-4775-9ae7-6213212d4ada", + Tags: []string{"sfo", "satx"}, + }, +} + +// FullListOutput represents the response body from a List request without a marker. +const FullListOutput = ` +{ + "stacks": [ + { + "description": "Simple template to test heat commands", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ], + "stack_status_reason": "Stack CREATE completed successfully", + "stack_name": "postman_stack", + "creation_time": "2015-02-03T20:07:39", + "updated_time": null, + "stack_status": "CREATE_COMPLETE", + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "tags": ["rackspace", "atx"] + }, + { + "description": "Simple template to test heat commands", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", + "rel": "self" + } + ], + "stack_status_reason": "Stack successfully updated", + "stack_name": "gophercloud-test-stack-2", + "creation_time": "2014-12-11T17:39:16", + "updated_time": "2014-12-11T17:40:37", + "stack_status": "UPDATE_COMPLETE", + "id": "db6977b2-27aa-4775-9ae7-6213212d4ada", + "tags": ["sfo", "satx"] + } + ] +} +` + +// HandleListSuccessfully creates an HTTP handler at `/stacks` on the test handler mux +// that responds with a `List` response. +func HandleListSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + r.ParseForm() + marker := r.Form.Get("marker") + switch marker { + case "": + fmt.Fprintf(w, output) + case "db6977b2-27aa-4775-9ae7-6213212d4ada": + fmt.Fprintf(w, `[]`) + default: + t.Fatalf("Unexpected marker: [%s]", marker) + } + }) +} + +// GetExpected represents the expected object from a Get request. +var GetExpected = &stacks.RetrievedStack{ + DisableRollback: true, + Description: "Simple template to test heat commands", + Parameters: map[string]string{ + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + }, + StatusReason: "Stack CREATE completed successfully", + Name: "postman_stack", + Outputs: []map[string]interface{}{}, + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + Capabilities: []interface{}{}, + NotificationTopics: []interface{}{}, + Status: "CREATE_COMPLETE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + TemplateDescription: "Simple template to test heat commands", + Tags: []string{"rackspace", "atx"}, +} + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "stack": { + "disable_rollback": true, + "description": "Simple template to test heat commands", + "parameters": { + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87" + }, + "stack_status_reason": "Stack CREATE completed successfully", + "stack_name": "postman_stack", + "outputs": [], + "creation_time": "2015-02-03T20:07:39", + "links": [ + { + "href": "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "rel": "self" + } + ], + "capabilities": [], + "notification_topics": [], + "timeout_mins": null, + "stack_status": "CREATE_COMPLETE", + "updated_time": null, + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "template_description": "Simple template to test heat commands", + "tags": ["rackspace", "atx"] + } +} +` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// HandleUpdateSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with an `Update` response. +func HandleUpdateSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleUpdatePatchSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with an `Update` response. +func HandleUpdatePatchSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PATCH") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +// HandleDeleteSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87` +// on the test handler mux that responds with a `Delete` response. +func HandleDeleteSuccessfully(t *testing.T) { + th.Mux.HandleFunc("/stacks/gophercloud-test-stack-2/db6977b2-27aa-4775-9ae7-6213212d4ada", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusNoContent) + }) +} + +// GetExpected represents the expected object from a Get request. +var PreviewExpected = &stacks.PreviewedStack{ + DisableRollback: true, + Description: "Simple template to test heat commands", + Parameters: map[string]string{ + "flavor": "m1.tiny", + "OS::stack_name": "postman_stack", + "OS::stack_id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + }, + Name: "postman_stack", + CreationTime: time.Date(2015, 2, 3, 20, 7, 39, 0, time.UTC), + Links: []gophercloud.Link{ + { + Href: "http://166.76.160.117:8004/v1/98606384f58d4ad0b3db7d0d779549ac/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Rel: "self", + }, + }, + Capabilities: []interface{}{}, + NotificationTopics: []interface{}{}, + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + TemplateDescription: "Simple template to test heat commands", +} + +// HandlePreviewSuccessfully creates an HTTP handler at `/stacks/preview` +// on the test handler mux that responds with a `Preview` response. +func HandlePreviewSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/preview", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// AbandonExpected represents the expected object from an Abandon request. +var AbandonExpected = &stacks.AbandonedStack{ + Status: "COMPLETE", + Name: "postman_stack", + Template: map[string]interface{}{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": map[string]interface{}{ + "flavor": map[string]interface{}{ + "default": "m1.tiny", + "type": "string", + }, + }, + "resources": map[string]interface{}{ + "hello_world": map[string]interface{}{ + "type": "OS::Nova::Server", + "properties": map[string]interface{}{ + "key_name": "heat_key", + "flavor": map[string]interface{}{ + "get_param": "flavor", + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n", + }, + }, + }, + }, + Action: "CREATE", + ID: "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + Resources: map[string]interface{}{ + "hello_world": map[string]interface{}{ + "status": "COMPLETE", + "name": "hello_world", + "resource_id": "8a310d36-46fc-436f-8be4-37a696b8ac63", + "action": "CREATE", + "type": "OS::Nova::Server", + }, + }, + Files: map[string]string{ + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "heat_template_version: 2014-10-16\nparameters:\n flavor:\n type: string\n description: Flavor for the server to be created\n default: 4353\n hidden: true\nresources:\n test_server:\n type: \"OS::Nova::Server\"\n properties:\n name: test-server\n flavor: 2 GB General Purpose v1\n image: Debian 7 (Wheezy) (PVHVM)\n", + }, + StackUserProjectID: "897686", + ProjectID: "897686", + Environment: map[string]interface{}{ + "encrypted_param_names": make([]map[string]interface{}, 0), + "parameter_defaults": make(map[string]interface{}), + "parameters": make(map[string]interface{}), + "resource_registry": map[string]interface{}{ + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml", + "resources": make(map[string]interface{}), + }, + }, +} + +// AbandonOutput represents the response body from an Abandon request. +const AbandonOutput = ` +{ + "status": "COMPLETE", + "name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }, + "action": "CREATE", + "id": "16ef0584-4458-41eb-87c8-0dc8d5f66c87", + "resources": { + "hello_world": { + "status": "COMPLETE", + "name": "hello_world", + "resource_id": "8a310d36-46fc-436f-8be4-37a696b8ac63", + "action": "CREATE", + "type": "OS::Nova::Server" + } + }, + "files": { + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "heat_template_version: 2014-10-16\nparameters:\n flavor:\n type: string\n description: Flavor for the server to be created\n default: 4353\n hidden: true\nresources:\n test_server:\n type: \"OS::Nova::Server\"\n properties:\n name: test-server\n flavor: 2 GB General Purpose v1\n image: Debian 7 (Wheezy) (PVHVM)\n" +}, + "environment": { + "encrypted_param_names": [], + "parameter_defaults": {}, + "parameters": {}, + "resource_registry": { + "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml": "file:///Users/prat8228/go/src/github.com/rackspace/rack/my_nova.yaml", + "resources": {} + } + }, + "stack_user_project_id": "897686", + "project_id": "897686" +}` + +// HandleAbandonSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/abandon` +// on the test handler mux that responds with an `Abandon` response. +func HandleAbandonSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c8/abandon", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/requests_test.go new file mode 100644 index 000000000000..f8bd1fe20159 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/testing/requests_test.go @@ -0,0 +1,224 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + template := new(stacks.Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + createOpts := stacks.CreateOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: gophercloud.Disabled, + } + actual, err := stacks.Create(fake.ServiceClient(), createOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAdoptStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleCreateSuccessfully(t, CreateOutput) + template := new(stacks.Template) + template.Bin = []byte(` +{ + "stack_name": "postman_stack", + "template": { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type":"OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + } +}`) + adoptOpts := stacks.AdoptOpts{ + AdoptStackData: `{environment{parameters{}}}`, + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: gophercloud.Disabled, + } + actual, err := stacks.Adopt(fake.ServiceClient(), adoptOpts).Extract() + th.AssertNoErr(t, err) + + expected := CreateExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestListStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleListSuccessfully(t, FullListOutput) + + count := 0 + err := stacks.List(fake.ServiceClient(), nil).EachPage(func(page pagination.Page) (bool, error) { + count++ + actual, err := stacks.ExtractStacks(page) + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, ListExpected, actual) + + return true, nil + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, count, 1) +} + +func TestGetStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := stacks.Get(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c87").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestUpdateStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + template := new(stacks.Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + updateOpts := &stacks.UpdateOpts{ + TemplateOpts: template, + } + err := stacks.Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestUpdateStackNoTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdateSuccessfully(t) + + parameters := make(map[string]interface{}) + parameters["flavor"] = "m1.tiny" + + updateOpts := &stacks.UpdateOpts{ + Parameters: parameters, + } + expected := stacks.ErrTemplateRequired{} + + err := stacks.Update(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertEquals(t, expected, err) +} + +func TestUpdatePatchStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleUpdatePatchSuccessfully(t) + + parameters := make(map[string]interface{}) + parameters["flavor"] = "m1.tiny" + + updateOpts := &stacks.UpdateOpts{ + Parameters: parameters, + } + err := stacks.UpdatePatch(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada", updateOpts).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestDeleteStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleDeleteSuccessfully(t) + + err := stacks.Delete(fake.ServiceClient(), "gophercloud-test-stack-2", "db6977b2-27aa-4775-9ae7-6213212d4ada").ExtractErr() + th.AssertNoErr(t, err) +} + +func TestPreviewStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandlePreviewSuccessfully(t, GetOutput) + + template := new(stacks.Template) + template.Bin = []byte(` + { + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + } + }`) + previewOpts := stacks.PreviewOpts{ + Name: "stackcreated", + Timeout: 60, + TemplateOpts: template, + DisableRollback: gophercloud.Disabled, + } + actual, err := stacks.Preview(fake.ServiceClient(), previewOpts).Extract() + th.AssertNoErr(t, err) + + expected := PreviewExpected + th.AssertDeepEquals(t, expected, actual) +} + +func TestAbandonStack(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleAbandonSuccessfully(t, AbandonOutput) + + actual, err := stacks.Abandon(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c8").Extract() + th.AssertNoErr(t, err) + + expected := AbandonExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/urls.go new file mode 100644 index 000000000000..b00be54e2ae4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/urls.go @@ -0,0 +1,35 @@ +package stacks + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("stacks") +} + +func adoptURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func getURL(c *gophercloud.ServiceClient, name, id string) string { + return c.ServiceURL("stacks", name, id) +} + +func updateURL(c *gophercloud.ServiceClient, name, id string) string { + return getURL(c, name, id) +} + +func deleteURL(c *gophercloud.ServiceClient, name, id string) string { + return getURL(c, name, id) +} + +func previewURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("stacks", "preview") +} + +func abandonURL(c *gophercloud.ServiceClient, name, id string) string { + return c.ServiceURL("stacks", name, id, "abandon") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils.go new file mode 100644 index 000000000000..71d9e351502b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils.go @@ -0,0 +1,160 @@ +package stacks + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "path/filepath" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" + "gopkg.in/yaml.v2" +) + +// Client is an interface that expects a Get method similar to http.Get. This +// is needed for unit testing, since we can mock an http client. Thus, the +// client will usually be an http.Client EXCEPT in unit tests. +type Client interface { + Get(string) (*http.Response, error) +} + +// TE is a base structure for both Template and Environment +type TE struct { + // Bin stores the contents of the template or environment. + Bin []byte + // URL stores the URL of the template. This is allowed to be a 'file://' + // for local files. + URL string + // Parsed contains a parsed version of Bin. Since there are 2 different + // fields referring to the same value, you must be careful when accessing + // this filed. + Parsed map[string]interface{} + // Files contains a mapping between the urls in templates to their contents. + Files map[string]string + // fileMaps is a map used internally when determining Files. + fileMaps map[string]string + // baseURL represents the location of the template or environment file. + baseURL string + // client is an interface which allows TE to fetch contents from URLS + client Client +} + +// Fetch fetches the contents of a TE from its URL. Once a TE structure has a +// URL, call the fetch method to fetch the contents. +func (t *TE) Fetch() error { + // if the baseURL is not provided, use the current directors as the base URL + if t.baseURL == "" { + u, err := getBasePath() + if err != nil { + return err + } + t.baseURL = u + } + + // if the contents are already present, do nothing. + if t.Bin != nil { + return nil + } + + // get a fqdn from the URL using the baseURL of the TE. For local files, + // the URL's will have the `file` scheme. + u, err := gophercloud.NormalizePathURL(t.baseURL, t.URL) + if err != nil { + return err + } + t.URL = u + + // get an HTTP client if none present + if t.client == nil { + t.client = getHTTPClient() + } + + // use the client to fetch the contents of the TE + resp, err := t.client.Get(t.URL) + if err != nil { + return err + } + defer resp.Body.Close() + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return err + } + t.Bin = body + return nil +} + +// get the basepath of the TE +func getBasePath() (string, error) { + basePath, err := filepath.Abs(".") + if err != nil { + return "", err + } + u, err := gophercloud.NormalizePathURL("", basePath) + if err != nil { + return "", err + } + return u, nil +} + +// get a an HTTP client to retrieve URL's. This client allows the use of `file` +// scheme since we may need to fetch files from users filesystem +func getHTTPClient() Client { + transport := &http.Transport{} + transport.RegisterProtocol("file", http.NewFileTransport(http.Dir("/"))) + return &http.Client{Transport: transport} +} + +// Parse will parse the contents and then validate. The contents MUST be either JSON or YAML. +func (t *TE) Parse() error { + if err := t.Fetch(); err != nil { + return err + } + if jerr := json.Unmarshal(t.Bin, &t.Parsed); jerr != nil { + if yerr := yaml.Unmarshal(t.Bin, &t.Parsed); yerr != nil { + return ErrInvalidDataFormat{} + } + } + return t.Validate() +} + +// Validate validates the contents of TE +func (t *TE) Validate() error { + return nil +} + +// igfunc is a parameter used by GetFileContents and GetRRFileContents to check +// for valid URL's. +type igFunc func(string, interface{}) bool + +// convert map[interface{}]interface{} to map[string]interface{} +func toStringKeys(m interface{}) (map[string]interface{}, error) { + switch m.(type) { + case map[string]interface{}, map[interface{}]interface{}: + typedMap := make(map[string]interface{}) + if _, ok := m.(map[interface{}]interface{}); ok { + for k, v := range m.(map[interface{}]interface{}) { + typedMap[k.(string)] = v + } + } else { + typedMap = m.(map[string]interface{}) + } + return typedMap, nil + default: + return nil, gophercloud.ErrUnexpectedType{Expected: "map[string]interface{}/map[interface{}]interface{}", Actual: fmt.Sprintf("%v", reflect.TypeOf(m))} + } +} + +// fix the reference to files by replacing relative URL's by absolute +// URL's +func (t *TE) fixFileRefs() { + tStr := string(t.Bin) + if t.fileMaps == nil { + return + } + for k, v := range t.fileMaps { + tStr = strings.Replace(tStr, k, v, -1) + } + t.Bin = []byte(tStr) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils_test.go new file mode 100644 index 000000000000..b64e4dcef5d8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacks/utils_test.go @@ -0,0 +1,94 @@ +package stacks + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestTEFixFileRefs(t *testing.T) { + te := TE{ + Bin: []byte(`string_to_replace: my fair lady`), + fileMaps: map[string]string{ + "string_to_replace": "london bridge is falling down", + }, + } + te.fixFileRefs() + th.AssertEquals(t, string(te.Bin), `london bridge is falling down: my fair lady`) +} + +func TestToStringKeys(t *testing.T) { + var test1 interface{} = map[interface{}]interface{}{ + "Adam": "Smith", + "Isaac": "Newton", + } + result1, err := toStringKeys(test1) + th.AssertNoErr(t, err) + + expected := map[string]interface{}{ + "Adam": "Smith", + "Isaac": "Newton", + } + th.AssertDeepEquals(t, result1, expected) +} + +func TestGetBasePath(t *testing.T) { + _, err := getBasePath() + th.AssertNoErr(t, err) +} + +// test if HTTP client can read file type URLS. Read the URL of this file +// because if this test is running, it means this file _must_ exist +func TestGetHTTPClient(t *testing.T) { + client := getHTTPClient() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + resp, err := client.Get(baseurl) + th.AssertNoErr(t, err) + th.AssertEquals(t, resp.StatusCode, 200) +} + +// Implement a fakeclient that can be used to mock out HTTP requests +type fakeClient struct { + BaseClient Client +} + +// this client's Get method first changes the URL given to point to +// testhelper's (th) endpoints. This is done because the http Mux does not seem +// to work for fqdns with the `file` scheme +func (c fakeClient) Get(url string) (*http.Response, error) { + newurl := strings.Replace(url, "file://", th.Endpoint(), 1) + return c.BaseClient.Get(newurl) +} + +// test the fetch function +func TestFetch(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + baseurl, err := getBasePath() + th.AssertNoErr(t, err) + fakeURL := strings.Join([]string{baseurl, "file.yaml"}, "/") + urlparsed, err := url.Parse(fakeURL) + th.AssertNoErr(t, err) + + th.Mux.HandleFunc(urlparsed.Path, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + w.Header().Set("Content-Type", "application/jason") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Fee-fi-fo-fum") + }) + + client := fakeClient{BaseClient: getHTTPClient()} + te := TE{ + URL: "file.yaml", + client: client, + } + err = te.Fetch() + th.AssertNoErr(t, err) + th.AssertEquals(t, fakeURL, te.URL) + th.AssertEquals(t, "Fee-fi-fo-fum", string(te.Bin)) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go new file mode 100644 index 000000000000..5af0bd62a113 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/doc.go @@ -0,0 +1,8 @@ +// Package stacktemplates provides operations for working with Heat templates. +// A Cloud Orchestration template is a portable file, written in a user-readable +// language, that describes how a set of resources should be assembled and what +// software should be installed in order to produce a working stack. The template +// specifies what resources should be used, what attributes can be set, and other +// parameters that are critical to the successful, repeatable automation of a +// specific application stack. +package stacktemplates diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go new file mode 100644 index 000000000000..d248c24ff64f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/requests.go @@ -0,0 +1,39 @@ +package stacktemplates + +import "github.com/gophercloud/gophercloud" + +// Get retreives data for the given stack template. +func Get(c *gophercloud.ServiceClient, stackName, stackID string) (r GetResult) { + _, r.Err = c.Get(getURL(c, stackName, stackID), &r.Body, nil) + return +} + +// ValidateOptsBuilder describes struct types that can be accepted by the Validate call. +// The ValidateOpts struct in this package does. +type ValidateOptsBuilder interface { + ToStackTemplateValidateMap() (map[string]interface{}, error) +} + +// ValidateOpts specifies the template validation parameters. +type ValidateOpts struct { + Template string `json:"template" or:"TemplateURL"` + TemplateURL string `json:"template_url" or:"Template"` +} + +// ToStackTemplateValidateMap assembles a request body based on the contents of a ValidateOpts. +func (opts ValidateOpts) ToStackTemplateValidateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// Validate validates the given stack template. +func Validate(c *gophercloud.ServiceClient, opts ValidateOptsBuilder) (r ValidateResult) { + b, err := opts.ToStackTemplateValidateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = c.Post(validateURL(c), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/results.go new file mode 100644 index 000000000000..bca959b9c12d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/results.go @@ -0,0 +1,44 @@ +package stacktemplates + +import ( + "encoding/json" + + "github.com/gophercloud/gophercloud" +) + +// GetResult represents the result of a Get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract returns the JSON template and is called after a Get operation. +func (r GetResult) Extract() ([]byte, error) { + if r.Err != nil { + return nil, r.Err + } + template, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + return nil, err + } + return template, nil +} + +// ValidatedTemplate represents the parsed object returned from a Validate request. +type ValidatedTemplate struct { + Description string `json:"Description"` + Parameters map[string]interface{} `json:"Parameters"` + ParameterGroups map[string]interface{} `json:"ParameterGroups"` +} + +// ValidateResult represents the result of a Validate operation. +type ValidateResult struct { + gophercloud.Result +} + +// Extract returns a pointer to a ValidatedTemplate object and is called after a +// Validate operation. +func (r ValidateResult) Extract() (*ValidatedTemplate, error) { + var s *ValidatedTemplate + err := r.ExtractInto(&s) + return s, err +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/doc.go new file mode 100644 index 000000000000..43c6b0f72d78 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/doc.go @@ -0,0 +1,2 @@ +// orchestration_stacktemplates_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/fixtures.go new file mode 100644 index 000000000000..23ec57917265 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/fixtures.go @@ -0,0 +1,96 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +// GetExpected represents the expected object from a Get request. +var GetExpected = "{\n \"description\": \"Simple template to test heat commands\",\n \"heat_template_version\": \"2013-05-23\",\n \"parameters\": {\n \"flavor\": {\n \"default\": \"m1.tiny\",\n \"type\": \"string\"\n }\n },\n \"resources\": {\n \"hello_world\": {\n \"properties\": {\n \"flavor\": {\n \"get_param\": \"flavor\"\n },\n \"image\": \"ad091b52-742f-469e-8f3c-fd81cadf0743\",\n \"key_name\": \"heat_key\"\n },\n \"type\": \"OS::Nova::Server\"\n }\n }\n}" + +// GetOutput represents the response body from a Get request. +const GetOutput = ` +{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743" + } + } + } +}` + +// HandleGetSuccessfully creates an HTTP handler at `/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/template` +// on the test handler mux that responds with a `Get` response. +func HandleGetSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/stacks/postman_stack/16ef0584-4458-41eb-87c8-0dc8d5f66c87/template", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} + +// ValidateExpected represents the expected object from a Validate request. +var ValidateExpected = &stacktemplates.ValidatedTemplate{ + Description: "Simple template to test heat commands", + Parameters: map[string]interface{}{ + "flavor": map[string]interface{}{ + "Default": "m1.tiny", + "Type": "String", + "NoEcho": "false", + "Description": "", + "Label": "flavor", + }, + }, +} + +// ValidateOutput represents the response body from a Validate request. +const ValidateOutput = ` +{ + "Description": "Simple template to test heat commands", + "Parameters": { + "flavor": { + "Default": "m1.tiny", + "Type": "String", + "NoEcho": "false", + "Description": "", + "Label": "flavor" + } + } +}` + +// HandleValidateSuccessfully creates an HTTP handler at `/validate` +// on the test handler mux that responds with a `Validate` response. +func HandleValidateSuccessfully(t *testing.T, output string) { + th.Mux.HandleFunc("/validate", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Accept", "application/json") + + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, output) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/requests_test.go new file mode 100644 index 000000000000..442bcb7a7f4f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/testing/requests_test.go @@ -0,0 +1,58 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestGetTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleGetSuccessfully(t, GetOutput) + + actual, err := stacktemplates.Get(fake.ServiceClient(), "postman_stack", "16ef0584-4458-41eb-87c8-0dc8d5f66c87").Extract() + th.AssertNoErr(t, err) + + expected := GetExpected + th.AssertDeepEquals(t, expected, string(actual)) +} + +func TestValidateTemplate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + HandleValidateSuccessfully(t, ValidateOutput) + + opts := stacktemplates.ValidateOpts{ + Template: `{ + "heat_template_version": "2013-05-23", + "description": "Simple template to test heat commands", + "parameters": { + "flavor": { + "default": "m1.tiny", + "type": "string" + } + }, + "resources": { + "hello_world": { + "type": "OS::Nova::Server", + "properties": { + "key_name": "heat_key", + "flavor": { + "get_param": "flavor" + }, + "image": "ad091b52-742f-469e-8f3c-fd81cadf0743", + "user_data": "#!/bin/bash -xv\necho \"hello world\" > /root/hello-world.txt\n" + } + } + } + }`, + } + actual, err := stacktemplates.Validate(fake.ServiceClient(), opts).Extract() + th.AssertNoErr(t, err) + + expected := ValidateExpected + th.AssertDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go new file mode 100644 index 000000000000..aed6b4b9de48 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/orchestration/v1/stacktemplates/urls.go @@ -0,0 +1,11 @@ +package stacktemplates + +import "github.com/gophercloud/gophercloud" + +func getURL(c *gophercloud.ServiceClient, stackName, stackID string) string { + return c.ServiceURL("stacks", stackName, stackID, "template") +} + +func validateURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("validate") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go new file mode 100644 index 000000000000..841a9c578cdb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/doc.go @@ -0,0 +1,3 @@ +// Package apiversions provides information and interaction with the different +// API versions for the Shared File System service, code-named Manila. +package apiversions diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go new file mode 100644 index 000000000000..8f0f7628def8 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/errors.go @@ -0,0 +1,23 @@ +package apiversions + +import ( + "fmt" +) + +// ErrVersionNotFound is the error when the requested API version +// could not be found. +type ErrVersionNotFound struct{} + +func (e ErrVersionNotFound) Error() string { + return fmt.Sprintf("Unable to find requested API version") +} + +// ErrMultipleVersionsFound is the error when a request for an API +// version returns multiple results. +type ErrMultipleVersionsFound struct { + Count int +} + +func (e ErrMultipleVersionsFound) Error() string { + return fmt.Sprintf("Found %d API versions", e.Count) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go new file mode 100644 index 000000000000..2e1f6639b575 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/requests.go @@ -0,0 +1,19 @@ +package apiversions + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List lists all the API versions available to end-users. +func List(c *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(c, listURL(c), func(r pagination.PageResult) pagination.Page { + return APIVersionPage{pagination.SinglePageBase(r)} + }) +} + +// Get will get a specific API version, specified by major ID. +func Get(client *gophercloud.ServiceClient, v string) (r GetResult) { + _, r.Err = client.Get(getURL(client, v), &r.Body, nil) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go new file mode 100644 index 000000000000..60c1f1b3ab1b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/results.go @@ -0,0 +1,73 @@ +package apiversions + +import ( + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// APIVersion represents an API version for the Shared File System service. +type APIVersion struct { + // ID is the unique identifier of the API version. + ID string `json:"id"` + + // MinVersion is the minimum microversion supported. + MinVersion string `json:"min_version"` + + // Status is the API versions status. + Status string `json:"status"` + + // Updated is the date when the API was last updated. + Updated time.Time `json:"updated"` + + // Version is the maximum microversion supported. + Version string `json:"version"` +} + +// APIVersionPage is the page returned by a pager when traversing over a +// collection of API versions. +type APIVersionPage struct { + pagination.SinglePageBase +} + +// IsEmpty checks whether an APIVersionPage struct is empty. +func (r APIVersionPage) IsEmpty() (bool, error) { + is, err := ExtractAPIVersions(r) + return len(is) == 0, err +} + +// ExtractAPIVersions takes a collection page, extracts all of the elements, +// and returns them a slice of APIVersion structs. It is effectively a cast. +func ExtractAPIVersions(r pagination.Page) ([]APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := (r.(APIVersionPage)).ExtractInto(&s) + return s.Versions, err +} + +// GetResult represents the result of a get operation. +type GetResult struct { + gophercloud.Result +} + +// Extract is a function that accepts a result and extracts an API version resource. +func (r GetResult) Extract() (*APIVersion, error) { + var s struct { + Versions []APIVersion `json:"versions"` + } + err := r.ExtractInto(&s) + if err != nil { + return nil, err + } + + switch len(s.Versions) { + case 0: + return nil, ErrVersionNotFound{} + case 1: + return &s.Versions[0], nil + default: + return nil, ErrMultipleVersionsFound{Count: len(s.Versions)} + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/doc.go new file mode 100644 index 000000000000..12e4bda0f9ed --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/doc.go @@ -0,0 +1,2 @@ +// apiversions_v1 +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/fixtures.go new file mode 100644 index 000000000000..9707d62af274 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/fixtures.go @@ -0,0 +1,227 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ManilaAPIVersionResponse = ` +{ + "versions": [ + { + "id": "v2.0", + "links": [ + { + "href": "http://docs.openstack.org/", + "rel": "describedby", + "type": "text/html" + }, + { + "href": "http://localhost:8786/v2/", + "rel": "self" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ], + "min_version": "2.0", + "status": "CURRENT", + "updated": "2015-08-27T11:33:21Z", + "version": "2.32" + } + ] +} +` + +const ManilaAPIInvalidVersionResponse_1 = ` +{ + "versions": [ + ] +} +` + +const ManilaAPIInvalidVersionResponse_2 = ` +{ + "versions": [ + { + "id": "v2.0", + "links": [ + { + "href": "http://docs.openstack.org/", + "rel": "describedby", + "type": "text/html" + }, + { + "href": "http://localhost:8786/v2/", + "rel": "self" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ], + "min_version": "2.0", + "status": "CURRENT", + "updated": "2015-08-27T11:33:21Z", + "version": "2.32" + }, + { + "id": "v2.9", + "links": [ + { + "href": "http://docs.openstack.org/", + "rel": "describedby", + "type": "text/html" + }, + { + "href": "http://localhost:8786/v2/", + "rel": "self" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ], + "min_version": "2.9", + "status": "CURRENT", + "updated": "2015-08-27T11:33:21Z", + "version": "2.99" + } + ] +} +` + +const ManilaAllAPIVersionsResponse = ` +{ + "versions": [ + { + "id": "v1.0", + "links": [ + { + "href": "http://docs.openstack.org/", + "rel": "describedby", + "type": "text/html" + }, + { + "href": "http://localhost:8786/v1/", + "rel": "self" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ], + "min_version": "", + "status": "DEPRECATED", + "updated": "2015-08-27T11:33:21Z", + "version": "" + }, + { + "id": "v2.0", + "links": [ + { + "href": "http://docs.openstack.org/", + "rel": "describedby", + "type": "text/html" + }, + { + "href": "http://localhost:8786/v2/", + "rel": "self" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ], + "min_version": "2.0", + "status": "CURRENT", + "updated": "2015-08-27T11:33:21Z", + "version": "2.32" + } + ] +} +` + +var ManilaAPIVersion1Result = apiversions.APIVersion{ + ID: "v1.0", + Status: "DEPRECATED", + Updated: time.Date(2015, 8, 27, 11, 33, 21, 0, time.UTC), +} + +var ManilaAPIVersion2Result = apiversions.APIVersion{ + ID: "v2.0", + Status: "CURRENT", + Updated: time.Date(2015, 8, 27, 11, 33, 21, 0, time.UTC), + MinVersion: "2.0", + Version: "2.32", +} + +var ManilaAllAPIVersionResults = []apiversions.APIVersion{ + ManilaAPIVersion1Result, + ManilaAPIVersion2Result, +} + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ManilaAllAPIVersionsResponse) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/v2/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ManilaAPIVersionResponse) + }) +} + +func MockGetNoResponse(t *testing.T) { + th.Mux.HandleFunc("/v2/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ManilaAPIInvalidVersionResponse_1) + }) +} + +func MockGetMultipleResponses(t *testing.T) { + th.Mux.HandleFunc("/v2/", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ManilaAPIInvalidVersionResponse_2) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/requests_test.go new file mode 100644 index 000000000000..8b5501fd8e21 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/testing/requests_test.go @@ -0,0 +1,56 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestListAPIVersions(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allVersions, err := apiversions.List(client.ServiceClient()).AllPages() + th.AssertNoErr(t, err) + + actual, err := apiversions.ExtractAPIVersions(allVersions) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ManilaAllAPIVersionResults, actual) +} + +func TestGetAPIVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + actual, err := apiversions.Get(client.ServiceClient(), "v2").Extract() + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, ManilaAPIVersion2Result, *actual) +} + +func TestGetNoAPIVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetNoResponse(t) + + _, err := apiversions.Get(client.ServiceClient(), "v2").Extract() + th.AssertEquals(t, err.Error(), "Unable to find requested API version") +} + +func TestGetMultipleAPIVersion(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetMultipleResponses(t) + + _, err := apiversions.Get(client.ServiceClient(), "v2").Extract() + th.AssertEquals(t, err.Error(), "Found 2 API versions") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go new file mode 100644 index 000000000000..6a30ca91e27f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/apiversions/urls.go @@ -0,0 +1,20 @@ +package apiversions + +import ( + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +func getURL(c *gophercloud.ServiceClient, version string) string { + u, _ := url.Parse(c.ServiceURL("")) + u.Path = "/" + strings.TrimRight(version, "/") + "/" + return u.String() +} + +func listURL(c *gophercloud.ServiceClient) string { + u, _ := url.Parse(c.ServiceURL("")) + u.Path = "/" + return u.String() +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go new file mode 100644 index 000000000000..df10b856eba1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/requests.go @@ -0,0 +1,13 @@ +package availabilityzones + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// List will return the existing availability zones. +func List(client *gophercloud.ServiceClient) pagination.Pager { + return pagination.NewPager(client, listURL(client), func(r pagination.PageResult) pagination.Page { + return AvailabilityZonePage{pagination.SinglePageBase(r)} + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go new file mode 100644 index 000000000000..83a76c1a8383 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/results.go @@ -0,0 +1,59 @@ +package availabilityzones + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// AvailabilityZone contains all the information associated with an OpenStack +// AvailabilityZone. +type AvailabilityZone struct { + // The availability zone ID. + ID string `json:"id"` + // The name of the availability zone. + Name string `json:"name"` + // The date and time stamp when the availability zone was created. + CreatedAt time.Time `json:"-"` + // The date and time stamp when the availability zone was updated. + UpdatedAt time.Time `json:"-"` +} + +type commonResult struct { + gophercloud.Result +} + +// ListResult contains the response body and error from a List request. +type AvailabilityZonePage struct { + pagination.SinglePageBase +} + +// ExtractAvailabilityZones will get the AvailabilityZone objects out of the shareTypeAccessResult object. +func ExtractAvailabilityZones(r pagination.Page) ([]AvailabilityZone, error) { + var a struct { + AvailabilityZone []AvailabilityZone `json:"availability_zones"` + } + err := (r.(AvailabilityZonePage)).ExtractInto(&a) + return a.AvailabilityZone, err +} + +func (r *AvailabilityZone) UnmarshalJSON(b []byte) error { + type tmp AvailabilityZone + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = AvailabilityZone(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/fixtures.go new file mode 100644 index 000000000000..e5db8cda6620 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/fixtures.go @@ -0,0 +1,32 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/os-availability-zone", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "availability_zones": [ + { + "name": "nova", + "created_at": "2015-09-18T09:50:55.000000", + "updated_at": null, + "id": "388c983d-258e-4a0e-b1ba-10da37d766db" + } + ] + }`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/requests_test.go new file mode 100644 index 000000000000..76c8574fc598 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/testing/requests_test.go @@ -0,0 +1,34 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// Verifies that availability zones can be listed correctly +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := availabilityzones.List(client.ServiceClient()).AllPages() + th.AssertNoErr(t, err) + actual, err := availabilityzones.ExtractAvailabilityZones(allPages) + th.AssertNoErr(t, err) + var nilTime time.Time + expected := []availabilityzones.AvailabilityZone{ + { + Name: "nova", + CreatedAt: time.Date(2015, 9, 18, 9, 50, 55, 0, time.UTC), + UpdatedAt: nilTime, + ID: "388c983d-258e-4a0e-b1ba-10da37d766db", + }, + } + + th.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go new file mode 100644 index 000000000000..fb4cdcf4e207 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/availabilityzones/urls.go @@ -0,0 +1,7 @@ +package availabilityzones + +import "github.com/gophercloud/gophercloud" + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("os-availability-zone") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go new file mode 100644 index 000000000000..8ef1ba1166a5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/requests.go @@ -0,0 +1,176 @@ +package securityservices + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +type SecurityServiceType string + +// Valid security service types +const ( + LDAP SecurityServiceType = "ldap" + Kerberos SecurityServiceType = "kerberos" + ActiveDirectory SecurityServiceType = "active_directory" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToSecurityServiceCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a SecurityService. This object is +// passed to the securityservices.Create function. For more information about +// these parameters, see the SecurityService object. +type CreateOpts struct { + // The security service type. A valid value is ldap, kerberos, or active_directory + Type SecurityServiceType `json:"type" required:"true"` + // The security service name + Name string `json:"name,omitempty"` + // The security service description + Description string `json:"description,omitempty"` + // The DNS IP address that is used inside the tenant network + DNSIP string `json:"dns_ip,omitempty"` + // The security service user or group name that is used by the tenant + User string `json:"user,omitempty"` + // The user password, if you specify a user + Password string `json:"password,omitempty"` + // The security service domain + Domain string `json:"domain,omitempty"` + // The security service host name or IP address + Server string `json:"server,omitempty"` +} + +// ToSecurityServicesCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToSecurityServiceCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_service") +} + +// Create will create a new SecurityService based on the values in CreateOpts. To +// extract the SecurityService object from the response, call the Extract method +// on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToSecurityServiceCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// Delete will delete the existing SecurityService with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToSecurityServiceListQuery() (string, error) +} + +// ListOpts holds options for listing SecurityServices. It is passed to the +// securityservices.List function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant security services. + AllTenants bool `q:"all_tenants"` + // The security service ID + ID string `q:"id"` + // The security service domain + Domain string `q:"domain"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type SecurityServiceType `q:"type"` + // The security service name + Name string `q:"name"` + // The DNS IP address that is used inside the tenant network + DNSIP string `q:"dns_ip"` + // The security service user or group name that is used by the tenant + User string `q:"user"` + // The security service host name or IP address + Server string `q:"server"` + // The ID of the share network using security services + ShareNetworkID string `q:"share_network_id"` +} + +// ToSecurityServiceListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToSecurityServiceListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns SecurityServices optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToSecurityServiceListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return SecurityServicePage{pagination.SinglePageBase(r)} + }) +} + +// Get retrieves the SecurityService with the provided ID. To extract the SecurityService +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToSecurityServiceUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing SecurityService. This object is passed +// to the securityservices.Update function. For more information about the parameters, see +// the SecurityService object. +type UpdateOpts struct { + // The security service name + Name string `json:"name"` + // The security service description + Description string `json:"description,omitempty"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type string `json:"type,omitempty"` + // The DNS IP address that is used inside the tenant network + DNSIP string `json:"dns_ip,omitempty"` + // The security service user or group name that is used by the tenant + User string `json:"user,omitempty"` + // The user password, if you specify a user + Password string `json:"password,omitempty"` + // The security service domain + Domain string `json:"domain,omitempty"` + // The security service host name or IP address + Server string `json:"server,omitempty"` +} + +// ToSecurityServiceUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToSecurityServiceUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "security_service") +} + +// Update will update the SecurityService with provided information. To extract the updated +// SecurityService from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToSecurityServiceUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go new file mode 100644 index 000000000000..ce18b8f76f30 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/results.go @@ -0,0 +1,113 @@ +package securityservices + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// SecurityService contains all the information associated with an OpenStack +// SecurityService. +type SecurityService struct { + // The security service ID + ID string `json:"id"` + // The UUID of the project where the security service was created + ProjectID string `json:"project_id"` + // The security service domain + Domain string `json:"domain"` + // The security service status + Status string `json:"status"` + // The security service type. A valid value is ldap, kerberos, or active_directory + Type string `json:"type"` + // The security service name + Name string `json:"name"` + // The security service description + Description string `json:"description"` + // The DNS IP address that is used inside the tenant network + DNSIP string `json:"dns_ip"` + // The security service user or group name that is used by the tenant + User string `json:"user"` + // The user password, if you specify a user + Password string `json:"password"` + // The security service host name or IP address + Server string `json:"server"` + // The date and time stamp when the security service was created + CreatedAt time.Time `json:"-"` + // The date and time stamp when the security service was updated + UpdatedAt time.Time `json:"-"` +} + +func (r *SecurityService) UnmarshalJSON(b []byte) error { + type tmp SecurityService + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = SecurityService(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// SecurityServicePage is a pagination.pager that is returned from a call to the List function. +type SecurityServicePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no SecurityServices. +func (r SecurityServicePage) IsEmpty() (bool, error) { + securityServices, err := ExtractSecurityServices(r) + return len(securityServices) == 0, err +} + +// ExtractSecurityServices extracts and returns SecurityServices. It is used while +// iterating over a securityservices.List call. +func ExtractSecurityServices(r pagination.Page) ([]SecurityService, error) { + var s struct { + SecurityServices []SecurityService `json:"security_services"` + } + err := (r.(SecurityServicePage)).ExtractInto(&s) + return s.SecurityServices, err +} + +// Extract will get the SecurityService object out of the commonResult object. +func (r commonResult) Extract() (*SecurityService, error) { + var s struct { + SecurityService *SecurityService `json:"security_service"` + } + err := r.ExtractInto(&s) + return s.SecurityService, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/fixtures.go new file mode 100644 index 000000000000..528c854537a1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/fixtures.go @@ -0,0 +1,192 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "security_service": { + "description": "Creating my first Security Service", + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "***", + "type": "kerberos", + "name": "SecServ1" + } + }`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_service": { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ1", + "created_at": "2015-09-07T12:19:10.695211", + "updated_at": null, + "server": null, + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "supersecret", + "type": "kerberos", + "id": "3c829734-0679-4c17-9637-801da48c0d5f", + "description": "Creating my first Security Service" + } + }`) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services/securityServiceID", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_services": [ + { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ1", + "created_at": "2015-09-07T12:19:10.000000", + "description": "Creating my first Security Service", + "updated_at": null, + "server": null, + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "supersecret", + "type": "kerberos", + "id": "3c829734-0679-4c17-9637-801da48c0d5f" + }, + { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ2", + "created_at": "2015-09-07T12:25:03.000000", + "description": "Creating my second Security Service", + "updated_at": null, + "server": null, + "dns_ip": "10.0.0.0/24", + "user": null, + "password": null, + "type": "ldap", + "id": "5a1d3a12-34a7-4087-8983-50e9ed03509a" + } + ] + }`) + }) +} + +func MockFilteredListResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "security_services": [ + { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ1", + "created_at": "2015-09-07T12:19:10.000000", + "description": "Creating my first Security Service", + "updated_at": null, + "server": null, + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "supersecret", + "type": "kerberos", + "id": "3c829734-0679-4c17-9637-801da48c0d5f" + } + ] + }`) + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services/3c829734-0679-4c17-9637-801da48c0d5f", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "security_service": { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ1", + "created_at": "2015-09-07T12:19:10.000000", + "updated_at": null, + "server": null, + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "supersecret", + "type": "kerberos", + "id": "3c829734-0679-4c17-9637-801da48c0d5f", + "description": "Creating my first Security Service" + } + }`) + }) +} + +func MockUpdateResponse(t *testing.T) { + th.Mux.HandleFunc("/security-services/securityServiceID", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "security_service": { + "status": "new", + "domain": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "name": "SecServ2", + "created_at": "2015-09-07T12:19:10.000000", + "updated_at": "2015-09-07T12:20:10.000000", + "server": null, + "dns_ip": "10.0.0.0/24", + "user": "demo", + "password": "supersecret", + "type": "kerberos", + "id": "securityServiceID", + "description": "Updating my first Security Service" + } + } + `) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/requests_test.go new file mode 100644 index 000000000000..06d9153d3a8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/testing/requests_test.go @@ -0,0 +1,208 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// Verifies that a security service can be created correctly +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &securityservices.CreateOpts{ + Name: "SecServ1", + Description: "Creating my first Security Service", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "***", + Type: "kerberos", + } + + s, err := securityservices.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, s.Name, "SecServ1") + th.AssertEquals(t, s.Description, "Creating my first Security Service") + th.AssertEquals(t, s.User, "demo") + th.AssertEquals(t, s.DNSIP, "10.0.0.0/24") + th.AssertEquals(t, s.Password, "supersecret") + th.AssertEquals(t, s.Type, "kerberos") +} + +// Verifies that a security service cannot be created without a type +func TestCreateFails(t *testing.T) { + options := &securityservices.CreateOpts{ + Name: "SecServ1", + Description: "Creating my first Security Service", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "***", + } + + _, err := securityservices.Create(client.ServiceClient(), options).Extract() + if _, ok := err.(gophercloud.ErrMissingInput); !ok { + t.Fatal("ErrMissingInput was expected to occur") + } +} + +// Verifies that security service deletion works +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := securityservices.Delete(client.ServiceClient(), "securityServiceID") + th.AssertNoErr(t, res.Err) +} + +// Verifies that security services can be listed correctly +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := securityservices.List(client.ServiceClient(), &securityservices.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := securityservices.ExtractSecurityServices(allPages) + th.AssertNoErr(t, err) + var nilTime time.Time + expected := []securityservices.SecurityService{ + { + Status: "new", + Domain: "", + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Name: "SecServ1", + CreatedAt: time.Date(2015, 9, 7, 12, 19, 10, 0, time.UTC), + Description: "Creating my first Security Service", + UpdatedAt: nilTime, + Server: "", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "supersecret", + Type: "kerberos", + ID: "3c829734-0679-4c17-9637-801da48c0d5f", + }, + { + Status: "new", + Domain: "", + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Name: "SecServ2", + CreatedAt: time.Date(2015, 9, 7, 12, 25, 03, 0, time.UTC), + Description: "Creating my second Security Service", + UpdatedAt: nilTime, + Server: "", + DNSIP: "10.0.0.0/24", + User: "", + Password: "", + Type: "ldap", + ID: "5a1d3a12-34a7-4087-8983-50e9ed03509a", + }, + } + + th.CheckDeepEquals(t, expected, actual) +} + +// Verifies that security services list can be called with query parameters +func TestFilteredList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockFilteredListResponse(t) + + options := &securityservices.ListOpts{ + Type: "kerberos", + } + + allPages, err := securityservices.List(client.ServiceClient(), options).AllPages() + th.AssertNoErr(t, err) + actual, err := securityservices.ExtractSecurityServices(allPages) + th.AssertNoErr(t, err) + var nilTime time.Time + expected := []securityservices.SecurityService{ + { + Status: "new", + Domain: "", + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Name: "SecServ1", + CreatedAt: time.Date(2015, 9, 7, 12, 19, 10, 0, time.UTC), + Description: "Creating my first Security Service", + UpdatedAt: nilTime, + Server: "", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "supersecret", + Type: "kerberos", + ID: "3c829734-0679-4c17-9637-801da48c0d5f", + }, + } + + th.CheckDeepEquals(t, expected, actual) +} + +// Verifies that it is possible to get a security service +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + var nilTime time.Time + expected := securityservices.SecurityService{ + ID: "3c829734-0679-4c17-9637-801da48c0d5f", + Name: "SecServ1", + CreatedAt: time.Date(2015, 9, 7, 12, 19, 10, 0, time.UTC), + Description: "Creating my first Security Service", + Type: "kerberos", + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Status: "new", + Domain: "", + Server: "", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "supersecret", + } + + n, err := securityservices.Get(client.ServiceClient(), "3c829734-0679-4c17-9637-801da48c0d5f").Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, &expected, n) +} + +// Verifies that it is possible to update a security service +func TestUpdate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateResponse(t) + expected := securityservices.SecurityService{ + ID: "securityServiceID", + Name: "SecServ2", + CreatedAt: time.Date(2015, 9, 7, 12, 19, 10, 0, time.UTC), + Description: "Updating my first Security Service", + Type: "kerberos", + UpdatedAt: time.Date(2015, 9, 7, 12, 20, 10, 0, time.UTC), + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Status: "new", + Domain: "", + Server: "", + DNSIP: "10.0.0.0/24", + User: "demo", + Password: "supersecret", + } + + options := securityservices.UpdateOpts{Name: "SecServ2"} + s, err := securityservices.Update(client.ServiceClient(), "securityServiceID", options).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, s) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go new file mode 100644 index 000000000000..c19b1f1062f5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/securityservices/urls.go @@ -0,0 +1,23 @@ +package securityservices + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("security-services") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("security-services", id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("security-services", "detail") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go new file mode 100644 index 000000000000..cdc026c01162 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/requests.go @@ -0,0 +1,233 @@ +package sharenetworks + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToShareNetworkCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a ShareNetwork. This object is +// passed to the sharenetworks.Create function. For more information about +// these parameters, see the ShareNetwork object. +type CreateOpts struct { + // The UUID of the Neutron network to set up for share servers + NeutronNetID string `json:"neutron_net_id,omitempty"` + // The UUID of the Neutron subnet to set up for share servers + NeutronSubnetID string `json:"neutron_subnet_id,omitempty"` + // The UUID of the nova network to set up for share servers + NovaNetID string `json:"nova_net_id,omitempty"` + // The share network name + Name string `json:"name"` + // The share network description + Description string `json:"description"` +} + +// ToShareNetworkCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToShareNetworkCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share_network") +} + +// Create will create a new ShareNetwork based on the values in CreateOpts. To +// extract the ShareNetwork object from the response, call the Extract method +// on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToShareNetworkCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will delete the existing ShareNetwork with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToShareNetworkListQuery() (string, error) +} + +// ListOpts holds options for listing ShareNetworks. It is passed to the +// sharenetworks.List function. +type ListOpts struct { + // admin-only option. Set it to true to see all tenant share networks. + AllTenants bool `q:"all_tenants"` + // The UUID of the project where the share network was created + ProjectID string `q:"project_id"` + // The neutron network ID + NeutronNetID string `q:"neutron_net_id"` + // The neutron subnet ID + NeutronSubnetID string `q:"neutron_subnet_id"` + // The nova network ID + NovaNetID string `q:"nova_net_id"` + // The network type. A valid value is VLAN, VXLAN, GRE or flat + NetworkType string `q:"network_type"` + // The Share Network name + Name string `q:"name"` + // The Share Network description + Description string `q:"description"` + // The Share Network IP version + IPVersion gophercloud.IPVersion `q:"ip_version"` + // The Share Network segmentation ID + SegmentationID int `q:"segmentation_id"` + // List all share networks created after the given date + CreatedSince string `q:"created_since"` + // List all share networks created before the given date + CreatedBefore string `q:"created_before"` + // Limit specifies the page size. + Limit int `q:"limit"` + // Limit specifies the page number. + Offset int `q:"offset"` +} + +// ToShareNetworkListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToShareNetworkListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail returns ShareNetworks optionally limited by the conditions provided in ListOpts. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToShareNetworkListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := ShareNetworkPage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Get retrieves the ShareNetwork with the provided ID. To extract the ShareNetwork +// object from the response, call the Extract method on the GetResult. +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// UpdateOptsBuilder allows extensions to add additional parameters to the +// Update request. +type UpdateOptsBuilder interface { + ToShareNetworkUpdateMap() (map[string]interface{}, error) +} + +// UpdateOpts contain options for updating an existing ShareNetwork. This object is passed +// to the sharenetworks.Update function. For more information about the parameters, see +// the ShareNetwork object. +type UpdateOpts struct { + // The share network name + Name string `json:"name,omitempty"` + // The share network description + Description string `json:"description,omitempty"` + // The UUID of the Neutron network to set up for share servers + NeutronNetID string `json:"neutron_net_id,omitempty"` + // The UUID of the Neutron subnet to set up for share servers + NeutronSubnetID string `json:"neutron_subnet_id,omitempty"` + // The UUID of the nova network to set up for share servers + NovaNetID string `json:"nova_net_id,omitempty"` +} + +// ToShareNetworkUpdateMap assembles a request body based on the contents of an +// UpdateOpts. +func (opts UpdateOpts) ToShareNetworkUpdateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share_network") +} + +// Update will update the ShareNetwork with provided information. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func Update(client *gophercloud.ServiceClient, id string, opts UpdateOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkUpdateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Put(updateURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// AddSecurityServiceOptsBuilder allows extensions to add additional parameters to the +// AddSecurityService request. +type AddSecurityServiceOptsBuilder interface { + ToShareNetworkAddSecurityServiceMap() (map[string]interface{}, error) +} + +// AddSecurityServiceOpts contain options for adding a security service to an +// existing ShareNetwork. This object is passed to the sharenetworks.AddSecurityService +// function. For more information about the parameters, see the ShareNetwork object. +type AddSecurityServiceOpts struct { + SecurityServiceID string `json:"security_service_id"` +} + +// ToShareNetworkAddSecurityServiceMap assembles a request body based on the contents of an +// AddSecurityServiceOpts. +func (opts AddSecurityServiceOpts) ToShareNetworkAddSecurityServiceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "add_security_service") +} + +// AddSecurityService will add the security service to a ShareNetwork. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func AddSecurityService(client *gophercloud.ServiceClient, id string, opts AddSecurityServiceOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkAddSecurityServiceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(addSecurityServiceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RemoveSecurityServiceOptsBuilder allows extensions to add additional parameters to the +// RemoveSecurityService request. +type RemoveSecurityServiceOptsBuilder interface { + ToShareNetworkRemoveSecurityServiceMap() (map[string]interface{}, error) +} + +// RemoveSecurityServiceOpts contain options for removing a security service from an +// existing ShareNetwork. This object is passed to the sharenetworks.RemoveSecurityService +// function. For more information about the parameters, see the ShareNetwork object. +type RemoveSecurityServiceOpts struct { + SecurityServiceID string `json:"security_service_id"` +} + +// ToShareNetworkRemoveSecurityServiceMap assembles a request body based on the contents of an +// RemoveSecurityServiceOpts. +func (opts RemoveSecurityServiceOpts) ToShareNetworkRemoveSecurityServiceMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "remove_security_service") +} + +// RemoveSecurityService will remove the security service from a ShareNetwork. To extract the updated +// ShareNetwork from the response, call the Extract method on the UpdateResult. +func RemoveSecurityService(client *gophercloud.ServiceClient, id string, opts RemoveSecurityServiceOptsBuilder) (r UpdateResult) { + b, err := opts.ToShareNetworkRemoveSecurityServiceMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(removeSecurityServiceURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go new file mode 100644 index 000000000000..fdb72569532c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/results.go @@ -0,0 +1,182 @@ +package sharenetworks + +import ( + "encoding/json" + "net/url" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ShareNetwork contains all the information associated with an OpenStack +// ShareNetwork. +type ShareNetwork struct { + // The Share Network ID + ID string `json:"id"` + // The UUID of the project where the share network was created + ProjectID string `json:"project_id"` + // The neutron network ID + NeutronNetID string `json:"neutron_net_id"` + // The neutron subnet ID + NeutronSubnetID string `json:"neutron_subnet_id"` + // The nova network ID + NovaNetID string `json:"nova_net_id"` + // The network type. A valid value is VLAN, VXLAN, GRE or flat + NetworkType string `json:"network_type"` + // The segmentation ID + SegmentationID int `json:"segmentation_id"` + // The IP block from which to allocate the network, in CIDR notation + CIDR string `json:"cidr"` + // The IP version of the network. A valid value is 4 or 6 + IPVersion int `json:"ip_version"` + // The Share Network name + Name string `json:"name"` + // The Share Network description + Description string `json:"description"` + // The date and time stamp when the Share Network was created + CreatedAt time.Time `json:"-"` + // The date and time stamp when the Share Network was updated + UpdatedAt time.Time `json:"-"` +} + +func (r *ShareNetwork) UnmarshalJSON(b []byte) error { + type tmp ShareNetwork + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + UpdatedAt gophercloud.JSONRFC3339MilliNoZ `json:"updated_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = ShareNetwork(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + r.UpdatedAt = time.Time(s.UpdatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// ShareNetworkPage is a pagination.pager that is returned from a call to the List function. +type ShareNetworkPage struct { + pagination.MarkerPageBase +} + +// NextPageURL generates the URL for the page of results after this one. +func (r ShareNetworkPage) NextPageURL() (string, error) { + currentURL := r.URL + mark, err := r.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("offset", mark) + currentURL.RawQuery = q.Encode() + return currentURL.String(), nil +} + +// LastMarker returns the last offset in a ListResult. +func (r ShareNetworkPage) LastMarker() (string, error) { + maxInt := strconv.Itoa(int(^uint(0) >> 1)) + shareNetworks, err := ExtractShareNetworks(r) + if err != nil { + return maxInt, err + } + if len(shareNetworks) == 0 { + return maxInt, nil + } + + u, err := url.Parse(r.URL.String()) + if err != nil { + return maxInt, err + } + queryParams := u.Query() + offset := queryParams.Get("offset") + limit := queryParams.Get("limit") + + // Limit is not present, only one page required + if limit == "" { + return maxInt, nil + } + + iOffset := 0 + if offset != "" { + iOffset, err = strconv.Atoi(offset) + if err != nil { + return maxInt, err + } + } + iLimit, err := strconv.Atoi(limit) + if err != nil { + return maxInt, err + } + iOffset = iOffset + iLimit + offset = strconv.Itoa(iOffset) + + return offset, nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (r ShareNetworkPage) IsEmpty() (bool, error) { + shareNetworks, err := ExtractShareNetworks(r) + return len(shareNetworks) == 0, err +} + +// ExtractShareNetworks extracts and returns ShareNetworks. It is used while +// iterating over a sharenetworks.List call. +func ExtractShareNetworks(r pagination.Page) ([]ShareNetwork, error) { + var s struct { + ShareNetworks []ShareNetwork `json:"share_networks"` + } + err := (r.(ShareNetworkPage)).ExtractInto(&s) + return s.ShareNetworks, err +} + +// Extract will get the ShareNetwork object out of the commonResult object. +func (r commonResult) Extract() (*ShareNetwork, error) { + var s struct { + ShareNetwork *ShareNetwork `json:"share_network"` + } + err := r.ExtractInto(&s) + return s.ShareNetwork, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// UpdateResult contains the response body and error from an Update request. +type UpdateResult struct { + commonResult +} + +// AddSecurityServiceResult contains the response body and error from a security +// service addition request. +type AddSecurityServiceResult struct { + commonResult +} + +// RemoveSecurityServiceResult contains the response body and error from a security +// service removal request. +type RemoveSecurityServiceResult struct { + commonResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/fixtures.go new file mode 100644 index 000000000000..ee5567a63666 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/fixtures.go @@ -0,0 +1,359 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func createReq(name, description, network, subnetwork string) string { + return fmt.Sprintf(`{ + "share_network": { + "name": "%s", + "description": "%s", + "neutron_net_id": "%s", + "neutron_subnet_id": "%s" + } + }`, name, description, network, subnetwork) +} + +func createResp(name, description, network, subnetwork string) string { + return fmt.Sprintf(` + { + "share_network": { + "name": "%s", + "description": "%s", + "segmentation_id": null, + "created_at": "2015-09-07T14:37:00.583656", + "updated_at": null, + "id": "77eb3421-4549-4789-ac39-0d5185d68c29", + "neutron_net_id": "%s", + "neutron_subnet_id": "%s", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "e10a683c20da41248cfd5e1ab3d88c62", + "network_type": null + } + }`, name, description, network, subnetwork) +} + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, createReq("my_network", + "This is my share network", + "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "53482b62-2c84-4a53-b6ab-30d9d9800d06")) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, createResp("my_network", + "This is my share network", + "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "53482b62-2c84-4a53-b6ab-30d9d9800d06")) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/fa158a3d-6d9f-4187-9ca5-abbb82646eb2", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("offset") + + switch marker { + case "": + fmt.Fprintf(w, `{ + "share_networks": [ + { + "name": "net_my1", + "segmentation_id": null, + "created_at": "2015-09-04T14:57:13.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "32763294-e3d4-456a-998d-60047677c2fb", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "descr" + }, + { + "name": "net_my", + "segmentation_id": null, + "created_at": "2015-09-04T14:54:25.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "713df749-aac0-4a54-af52-10f6c991e80c", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "desecr" + }, + { + "name": null, + "segmentation_id": null, + "created_at": "2015-09-04T14:51:41.000000", + "neutron_subnet_id": null, + "updated_at": null, + "id": "fa158a3d-6d9f-4187-9ca5-abbb82646eb2", + "neutron_net_id": null, + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": null + } + ] + }`) + default: + fmt.Fprintf(w, ` + { + "share_networks": [] + }`) + } + }) +} + +func MockFilteredListResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("offset") + switch marker { + case "": + fmt.Fprintf(w, ` + { + "share_networks": [ + { + "name": "net_my1", + "segmentation_id": null, + "created_at": "2015-09-04T14:57:13.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "32763294-e3d4-456a-998d-60047677c2fb", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "descr" + } + ] + }`) + case "1": + fmt.Fprintf(w, ` + { + "share_networks": [ + { + "name": "net_my1", + "segmentation_id": null, + "created_at": "2015-09-04T14:57:13.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "32763294-e3d4-456a-998d-60047677c2fb", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "descr" + } + ] + }`) + case "2": + fmt.Fprintf(w, ` + { + "share_networks": [ + { + "name": "net_my1", + "segmentation_id": null, + "created_at": "2015-09-04T14:57:13.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "32763294-e3d4-456a-998d-60047677c2fb", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "descr" + } + ] + }`) + default: + fmt.Fprintf(w, ` + { + "share_networks": [] + }`) + } + }) +} + +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/7f950b52-6141-4a08-bbb5-bb7ffa3ea5fd", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_network": { + "name": "net_my1", + "segmentation_id": null, + "created_at": "2015-09-04T14:56:45.000000", + "neutron_subnet_id": "53482b62-2c84-4a53-b6ab-30d9d9800d06", + "updated_at": null, + "id": "7f950b52-6141-4a08-bbb5-bb7ffa3ea5fd", + "neutron_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "ip_version": null, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "descr" + } + }`) + }) +} + +func MockUpdateNeutronResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/713df749-aac0-4a54-af52-10f6c991e80c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_network": { + "name": "net_my2", + "segmentation_id": null, + "created_at": "2015-09-04T14:54:25.000000", + "neutron_subnet_id": "new-neutron-subnet-id", + "updated_at": "2015-09-07T08:02:53.512184", + "id": "713df749-aac0-4a54-af52-10f6c991e80c", + "neutron_net_id": "new-neutron-id", + "ip_version": 4, + "nova_net_id": null, + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "new description" + } + } + `) + }) +} + +func MockUpdateNovaResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/713df749-aac0-4a54-af52-10f6c991e80c", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "PUT") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_network": { + "name": "net_my2", + "segmentation_id": null, + "created_at": "2015-09-04T14:54:25.000000", + "neutron_subnet_id": null, + "updated_at": "2015-09-07T08:02:53.512184", + "id": "713df749-aac0-4a54-af52-10f6c991e80c", + "neutron_net_id": null, + "ip_version": 4, + "nova_net_id": "new-nova-id", + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": "new description" + } + } + `) + }) +} + +func MockAddSecurityServiceResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/shareNetworkID/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_network": { + "name": "net2", + "segmentation_id": null, + "created_at": "2015-09-07T12:31:12.000000", + "neutron_subnet_id": null, + "updated_at": null, + "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", + "neutron_net_id": null, + "ip_version": 4, + "nova_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": null + } + }`) + }) +} + +func MockRemoveSecurityServiceResponse(t *testing.T) { + th.Mux.HandleFunc("/share-networks/shareNetworkID/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_network": { + "name": "net2", + "segmentation_id": null, + "created_at": "2015-09-07T12:31:12.000000", + "neutron_subnet_id": null, + "updated_at": null, + "id": "d8ae6799-2567-4a89-aafb-fa4424350d2b", + "neutron_net_id": null, + "ip_version": null, + "nova_net_id": "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + "cidr": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "network_type": null, + "description": null + } + }`) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/requests_test.go new file mode 100644 index 000000000000..efa4691861de --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/testing/requests_test.go @@ -0,0 +1,280 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks" + "github.com/gophercloud/gophercloud/pagination" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// Verifies that a share network can be created correctly +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &sharenetworks.CreateOpts{ + Name: "my_network", + Description: "This is my share network", + NeutronNetID: "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + NeutronSubnetID: "53482b62-2c84-4a53-b6ab-30d9d9800d06", + } + + n, err := sharenetworks.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, n.Name, "my_network") + th.AssertEquals(t, n.Description, "This is my share network") + th.AssertEquals(t, n.NeutronNetID, "998b42ee-2cee-4d36-8b95-67b5ca1f2109") + th.AssertEquals(t, n.NeutronSubnetID, "53482b62-2c84-4a53-b6ab-30d9d9800d06") +} + +// Verifies that share network deletion works +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + res := sharenetworks.Delete(client.ServiceClient(), "fa158a3d-6d9f-4187-9ca5-abbb82646eb2") + th.AssertNoErr(t, res.Err) +} + +// Verifies that share networks can be listed correctly +func TestListDetail(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := sharenetworks.ListDetail(client.ServiceClient(), &sharenetworks.ListOpts{}).AllPages() + + th.AssertNoErr(t, err) + actual, err := sharenetworks.ExtractShareNetworks(allPages) + th.AssertNoErr(t, err) + + var nilTime time.Time + expected := []sharenetworks.ShareNetwork{ + { + ID: "32763294-e3d4-456a-998d-60047677c2fb", + Name: "net_my1", + CreatedAt: time.Date(2015, 9, 4, 14, 57, 13, 0, time.UTC), + Description: "descr", + NetworkType: "", + CIDR: "", + NovaNetID: "", + NeutronNetID: "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + NeutronSubnetID: "53482b62-2c84-4a53-b6ab-30d9d9800d06", + IPVersion: 0, + SegmentationID: 0, + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + }, + { + ID: "713df749-aac0-4a54-af52-10f6c991e80c", + Name: "net_my", + CreatedAt: time.Date(2015, 9, 4, 14, 54, 25, 0, time.UTC), + Description: "desecr", + NetworkType: "", + CIDR: "", + NovaNetID: "", + NeutronNetID: "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + NeutronSubnetID: "53482b62-2c84-4a53-b6ab-30d9d9800d06", + IPVersion: 0, + SegmentationID: 0, + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + }, + { + ID: "fa158a3d-6d9f-4187-9ca5-abbb82646eb2", + Name: "", + CreatedAt: time.Date(2015, 9, 4, 14, 51, 41, 0, time.UTC), + Description: "", + NetworkType: "", + CIDR: "", + NovaNetID: "", + NeutronNetID: "", + NeutronSubnetID: "", + IPVersion: 0, + SegmentationID: 0, + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + }, + } + + th.CheckDeepEquals(t, expected, actual) +} + +// Verifies that share networks list can be called with query parameters +func TestPaginatedListDetail(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockFilteredListResponse(t) + + options := &sharenetworks.ListOpts{ + Offset: 0, + Limit: 1, + } + + count := 0 + + err := sharenetworks.ListDetail(client.ServiceClient(), options).EachPage(func(page pagination.Page) (bool, error) { + count++ + _, err := sharenetworks.ExtractShareNetworks(page) + if err != nil { + t.Errorf("Failed to extract share networks: %v", err) + return false, err + } + + return true, nil + }) + th.AssertNoErr(t, err) + + th.AssertEquals(t, count, 3) +} + +// Verifies that it is possible to get a share network +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + var nilTime time.Time + expected := sharenetworks.ShareNetwork{ + ID: "7f950b52-6141-4a08-bbb5-bb7ffa3ea5fd", + Name: "net_my1", + CreatedAt: time.Date(2015, 9, 4, 14, 56, 45, 0, time.UTC), + Description: "descr", + NetworkType: "", + CIDR: "", + NovaNetID: "", + NeutronNetID: "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + NeutronSubnetID: "53482b62-2c84-4a53-b6ab-30d9d9800d06", + IPVersion: 0, + SegmentationID: 0, + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + } + + n, err := sharenetworks.Get(client.ServiceClient(), "7f950b52-6141-4a08-bbb5-bb7ffa3ea5fd").Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, &expected, n) +} + +// Verifies that it is possible to update a share network using neutron network +func TestUpdateNeutron(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateNeutronResponse(t) + + expected := sharenetworks.ShareNetwork{ + ID: "713df749-aac0-4a54-af52-10f6c991e80c", + Name: "net_my2", + CreatedAt: time.Date(2015, 9, 4, 14, 54, 25, 0, time.UTC), + Description: "new description", + NetworkType: "", + CIDR: "", + NovaNetID: "", + NeutronNetID: "new-neutron-id", + NeutronSubnetID: "new-neutron-subnet-id", + IPVersion: 4, + SegmentationID: 0, + UpdatedAt: time.Date(2015, 9, 7, 8, 2, 53, 512184000, time.UTC), + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + } + + options := sharenetworks.UpdateOpts{ + Name: "net_my2", + Description: "new description", + NeutronNetID: "new-neutron-id", + NeutronSubnetID: "new-neutron-subnet-id", + } + + v, err := sharenetworks.Update(client.ServiceClient(), "713df749-aac0-4a54-af52-10f6c991e80c", options).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, v) +} + +// Verifies that it is possible to update a share network using nova network +func TestUpdateNova(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUpdateNovaResponse(t) + + expected := sharenetworks.ShareNetwork{ + ID: "713df749-aac0-4a54-af52-10f6c991e80c", + Name: "net_my2", + CreatedAt: time.Date(2015, 9, 4, 14, 54, 25, 0, time.UTC), + Description: "new description", + NetworkType: "", + CIDR: "", + NovaNetID: "new-nova-id", + NeutronNetID: "", + NeutronSubnetID: "", + IPVersion: 4, + SegmentationID: 0, + UpdatedAt: time.Date(2015, 9, 7, 8, 2, 53, 512184000, time.UTC), + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + } + + options := sharenetworks.UpdateOpts{ + Name: "net_my2", + Description: "new description", + NovaNetID: "new-nova-id", + } + + v, err := sharenetworks.Update(client.ServiceClient(), "713df749-aac0-4a54-af52-10f6c991e80c", options).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, v) +} + +// Verifies that it is possible to add a security service to a share network +func TestAddSecurityService(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAddSecurityServiceResponse(t) + + var nilTime time.Time + expected := sharenetworks.ShareNetwork{ + ID: "d8ae6799-2567-4a89-aafb-fa4424350d2b", + Name: "net2", + CreatedAt: time.Date(2015, 9, 7, 12, 31, 12, 0, time.UTC), + Description: "", + NetworkType: "", + CIDR: "", + NovaNetID: "998b42ee-2cee-4d36-8b95-67b5ca1f2109", + NeutronNetID: "", + NeutronSubnetID: "", + IPVersion: 4, + SegmentationID: 0, + UpdatedAt: nilTime, + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + } + + options := sharenetworks.AddSecurityServiceOpts{SecurityServiceID: "securityServiceID"} + s, err := sharenetworks.AddSecurityService(client.ServiceClient(), "shareNetworkID", options).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, s) +} + +// Verifies that it is possible to remove a security service from a share network +func TestRemoveSecurityService(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockRemoveSecurityServiceResponse(t) + + options := sharenetworks.RemoveSecurityServiceOpts{SecurityServiceID: "securityServiceID"} + _, err := sharenetworks.RemoveSecurityService(client.ServiceClient(), "shareNetworkID", options).Extract() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go new file mode 100644 index 000000000000..667bd2a526d7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharenetworks/urls.go @@ -0,0 +1,31 @@ +package sharenetworks + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("share-networks") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id) +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("share-networks", "detail") +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func updateURL(c *gophercloud.ServiceClient, id string) string { + return deleteURL(c, id) +} + +func addSecurityServiceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id, "action") +} + +func removeSecurityServiceURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("share-networks", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go new file mode 100644 index 000000000000..529d38503b6a --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/requests.go @@ -0,0 +1,343 @@ +package shares + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToShareCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains the options for create a Share. This object is +// passed to shares.Create(). For more information about these parameters, +// please refer to the Share object, or the shared file systems API v2 +// documentation +type CreateOpts struct { + // Defines the share protocol to use + ShareProto string `json:"share_proto" required:"true"` + // Size in GB + Size int `json:"size" required:"true"` + // Defines the share name + Name string `json:"name,omitempty"` + // Share description + Description string `json:"description,omitempty"` + // DisplayName is equivalent to Name. The API supports using both + // This is an inherited attribute from the block storage API + DisplayName string `json:"display_name,omitempty"` + // DisplayDescription is equivalent to Description. The API supports using both + // This is an inherited attribute from the block storage API + DisplayDescription string `json:"display_description,omitempty"` + // ShareType defines the sharetype. If omitted, a default share type is used + ShareType string `json:"share_type,omitempty"` + // VolumeType is deprecated but supported. Either ShareType or VolumeType can be used + VolumeType string `json:"volume_type,omitempty"` + // The UUID from which to create a share + SnapshotID string `json:"snapshot_id,omitempty"` + // Determines whether or not the share is public + IsPublic *bool `json:"is_public,omitempty"` + // Key value pairs of user defined metadata + Metadata map[string]string `json:"metadata,omitempty"` + // The UUID of the share network to which the share belongs to + ShareNetworkID string `json:"share_network_id,omitempty"` + // The UUID of the consistency group to which the share belongs to + ConsistencyGroupID string `json:"consistency_group_id,omitempty"` + // The availability zone of the share + AvailabilityZone string `json:"availability_zone,omitempty"` +} + +// ToShareCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToShareCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share") +} + +// Create will create a new Share based on the values in CreateOpts. To extract +// the Share object from the response, call the Extract method on the +// CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToShareCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 201}, + }) + return +} + +// ListOpts holds options for listing Shares. It is passed to the +// shares.List function. +type ListOpts struct { + // (Admin only). Defines whether to list the requested resources for all projects. + AllTenants bool `q:"all_tenants"` + // The share name. + Name string `q:"name"` + // Filters by a share status. + Status string `q:"status"` + // The UUID of the share server. + ShareServerID string `q:"share_server_id"` + // One or more metadata key and value pairs as a dictionary of strings. + Metadata map[string]string `q:"metadata"` + // The extra specifications for the share type. + ExtraSpecs map[string]string `q:"extra_specs"` + // The UUID of the share type. + ShareTypeID string `q:"share_type_id"` + // The maximum number of shares to return. + Limit int `q:"limit"` + // The offset to define start point of share or share group listing. + Offset int `q:"offset"` + // The key to sort a list of shares. + SortKey string `q:"sort_key"` + // The direction to sort a list of shares. + SortDir string `q:"sort_dir"` + // The UUID of the share’s base snapshot to filter the request based on. + SnapshotID string `q:"snapshot_id"` + // The share host name. + Host string `q:"host"` + // The share network ID. + ShareNetworkID string `q:"share_network_id"` + // The UUID of the project in which the share was created. Useful with all_tenants parameter. + ProjectID string `q:"project_id"` + // The level of visibility for the share. + IsPublic *bool `q:"is_public"` + // The UUID of a share group to filter resource. + ShareGroupID string `q:"share_group_id"` + // The export location UUID that can be used to filter shares or share instances. + ExportLocationID string `q:"export_location_id"` + // The export location path that can be used to filter shares or share instances. + ExportLocationPath string `q:"export_location_path"` + // The name pattern that can be used to filter shares, share snapshots, share networks or share groups. + NamePattern string `q:"name~"` + // The description pattern that can be used to filter shares, share snapshots, share networks or share groups. + DescriptionPattern string `q:"description~"` + // Whether to show count in API response or not, default is False. + WithCount bool `q:"with_count"` + // DisplayName is equivalent to Name. The API supports using both + // This is an inherited attribute from the block storage API + DisplayName string `q:"display_name"` + // Equivalent to NamePattern. + DisplayNamePattern string `q:"display_name~"` + // VolumeTypeID is deprecated but supported. Either ShareTypeID or VolumeTypeID can be used + VolumeTypeID string `q:"volume_type_id"` + // The UUID of the share group snapshot. + ShareGroupSnapshotID string `q:"share_group_snapshot_id"` + // DisplayDescription is equivalent to Description. The API supports using both + // This is an inherited attribute from the block storage API + DisplayDescription string `q:"display_description"` + // Equivalent to DescriptionPattern + DisplayDescriptionPattern string `q:"display_description~"` +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToShareListQuery() (string, error) +} + +// ToShareListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToShareListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// ListDetail returns []Share optionally limited by the conditions provided in ListOpts. +func ListDetail(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listDetailURL(client) + if opts != nil { + query, err := opts.ToShareListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + p := SharePage{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + }) +} + +// Delete will delete an existing Share with the given UUID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// Get will get a single share with given UUID +func Get(client *gophercloud.ServiceClient, id string) (r GetResult) { + _, r.Err = client.Get(getURL(client, id), &r.Body, nil) + return +} + +// GetExportLocations will get shareID's export locations. +// Client must have Microversion set; minimum supported microversion for GetExportLocations is 2.14. +func GetExportLocations(client *gophercloud.ServiceClient, id string) (r GetExportLocationsResult) { + _, r.Err = client.Get(getExportLocationsURL(client, id), &r.Body, nil) + return +} + +// GrantAccessOptsBuilder allows extensions to add additional parameters to the +// GrantAccess request. +type GrantAccessOptsBuilder interface { + ToGrantAccessMap() (map[string]interface{}, error) +} + +// GrantAccessOpts contains the options for creation of an GrantAccess request. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Grant Access documentation +type GrantAccessOpts struct { + // The access rule type that can be "ip", "cert" or "user". + AccessType string `json:"access_type"` + // The value that defines the access that can be a valid format of IP, cert or user. + AccessTo string `json:"access_to"` + // The access level to the share is either "rw" or "ro". + AccessLevel string `json:"access_level"` +} + +// ToGrantAccessMap assembles a request body based on the contents of a +// GrantAccessOpts. +func (opts GrantAccessOpts) ToGrantAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "allow_access") +} + +// GrantAccess will grant access to a Share based on the values in GrantAccessOpts. To extract +// the GrantAccess object from the response, call the Extract method on the GrantAccessResult. +// Client must have Microversion set; minimum supported microversion for GrantAccess is 2.7. +func GrantAccess(client *gophercloud.ServiceClient, id string, opts GrantAccessOptsBuilder) (r GrantAccessResult) { + b, err := opts.ToGrantAccessMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(grantAccessURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// RevokeAccessOptsBuilder allows extensions to add additional parameters to the +// RevokeAccess request. +type RevokeAccessOptsBuilder interface { + ToRevokeAccessMap() (map[string]interface{}, error) +} + +// RevokeAccessOpts contains the options for creation of a RevokeAccess request. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Revoke Access documentation +type RevokeAccessOpts struct { + AccessID string `json:"access_id"` +} + +// ToRevokeAccessMap assembles a request body based on the contents of a +// RevokeAccessOpts. +func (opts RevokeAccessOpts) ToRevokeAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "deny_access") +} + +// RevokeAccess will revoke an existing access to a Share based on the values in RevokeAccessOpts. +// RevokeAccessResult contains only the error. To extract it, call the ExtractErr method on +// the RevokeAccessResult. Client must have Microversion set; minimum supported microversion +// for RevokeAccess is 2.7. +func RevokeAccess(client *gophercloud.ServiceClient, id string, opts RevokeAccessOptsBuilder) (r RevokeAccessResult) { + b, err := opts.ToRevokeAccessMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(revokeAccessURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + + return +} + +// ListAccessRights lists all access rules assigned to a Share based on its id. To extract +// the AccessRight slice from the response, call the Extract method on the ListAccessRightsResult. +// Client must have Microversion set; minimum supported microversion for ListAccessRights is 2.7. +func ListAccessRights(client *gophercloud.ServiceClient, id string) (r ListAccessRightsResult) { + requestBody := map[string]interface{}{"access_list": nil} + _, r.Err = client.Post(listAccessRightsURL(client, id), requestBody, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200}, + }) + return +} + +// ExtendOptsBuilder allows extensions to add additional parameters to the +// Extend request. +type ExtendOptsBuilder interface { + ToShareExtendMap() (map[string]interface{}, error) +} + +// ExtendOpts contains options for extending a Share. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Extend share documentation +type ExtendOpts struct { + // New size in GBs. + NewSize int `json:"new_size"` +} + +// ToShareExtendMap assembles a request body based on the contents of a +// ExtendOpts. +func (opts ExtendOpts) ToShareExtendMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "extend") +} + +// Extend will extend the capacity of an existing share. ExtendResult contains only the error. +// To extract it, call the ExtractErr method on the ExtendResult. +// Client must have Microversion set; minimum supported microversion for Extend is 2.7. +func Extend(client *gophercloud.ServiceClient, id string, opts ExtendOptsBuilder) (r ExtendResult) { + b, err := opts.ToShareExtendMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(extendURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return +} + +// ShrinkOptsBuilder allows extensions to add additional parameters to the +// Shrink request. +type ShrinkOptsBuilder interface { + ToShareShrinkMap() (map[string]interface{}, error) +} + +// ShrinkOpts contains options for shrinking a Share. +// For more information about these parameters, please, refer to the shared file systems API v2, +// Share Actions, Shrink share documentation +type ShrinkOpts struct { + // New size in GBs. + NewSize int `json:"new_size"` +} + +// ToShareShrinkMap assembles a request body based on the contents of a +// ShrinkOpts. +func (opts ShrinkOpts) ToShareShrinkMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "shrink") +} + +// Shrink will shrink the capacity of an existing share. ShrinkResult contains only the error. +// To extract it, call the ExtractErr method on the ShrinkResult. +// Client must have Microversion set; minimum supported microversion for Shrink is 2.7. +func Shrink(client *gophercloud.ServiceClient, id string, opts ShrinkOptsBuilder) (r ShrinkResult) { + b, err := opts.ToShareShrinkMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(shrinkURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{202}, + }) + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go new file mode 100644 index 000000000000..df26c66969bd --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/results.go @@ -0,0 +1,317 @@ +package shares + +import ( + "encoding/json" + "net/url" + "strconv" + "time" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +const ( + invalidMarker = "-1" +) + +// Share contains all information associated with an OpenStack Share +type Share struct { + // The availability zone of the share + AvailabilityZone string `json:"availability_zone"` + // A description of the share + Description string `json:"description,omitempty"` + // DisplayDescription is inherited from BlockStorage API. + // Both Description and DisplayDescription can be used + DisplayDescription string `json:"display_description,omitempty"` + // DisplayName is inherited from BlockStorage API + // Both DisplayName and Name can be used + DisplayName string `json:"display_name,omitempty"` + // Indicates whether a share has replicas or not. + HasReplicas bool `json:"has_replicas"` + // The host name of the share + Host string `json:"host"` + // The UUID of the share + ID string `json:"id"` + // Indicates the visibility of the share + IsPublic bool `json:"is_public,omitempty"` + // Share links for pagination + Links []map[string]string `json:"links"` + // Key, value -pairs of custom metadata + Metadata map[string]string `json:"metadata,omitempty"` + // The name of the share + Name string `json:"name,omitempty"` + // The UUID of the project to which this share belongs to + ProjectID string `json:"project_id"` + // The share replication type + ReplicationType string `json:"replication_type,omitempty"` + // The UUID of the share network + ShareNetworkID string `json:"share_network_id"` + // The shared file system protocol + ShareProto string `json:"share_proto"` + // The UUID of the share server + ShareServerID string `json:"share_server_id"` + // The UUID of the share type. + ShareType string `json:"share_type"` + // The name of the share type. + ShareTypeName string `json:"share_type_name"` + // Size of the share in GB + Size int `json:"size"` + // UUID of the snapshot from which to create the share + SnapshotID string `json:"snapshot_id"` + // The share status + Status string `json:"status"` + // The task state, used for share migration + TaskState string `json:"task_state"` + // The type of the volume + VolumeType string `json:"volume_type,omitempty"` + // The UUID of the consistency group this share belongs to + ConsistencyGroupID string `json:"consistency_group_id"` + // Used for filtering backends which either support or do not support share snapshots + SnapshotSupport bool `json:"snapshot_support"` + SourceCgsnapshotMemberID string `json:"source_cgsnapshot_member_id"` + // Timestamp when the share was created + CreatedAt time.Time `json:"-"` +} + +func (r *Share) UnmarshalJSON(b []byte) error { + type tmp Share + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339MilliNoZ `json:"created_at"` + } + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + *r = Share(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + + return nil +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the Share object from the commonResult +func (r commonResult) Extract() (*Share, error) { + var s struct { + Share *Share `json:"share"` + } + err := r.ExtractInto(&s) + return s.Share, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// SharePage is a pagination.pager that is returned from a call to the List function. +type SharePage struct { + pagination.MarkerPageBase +} + +// NextPageURL generates the URL for the page of results after this one. +func (r SharePage) NextPageURL() (string, error) { + currentURL := r.URL + mark, err := r.Owner.LastMarker() + if err != nil { + return "", err + } + if mark == invalidMarker { + return "", nil + } + + q := currentURL.Query() + q.Set("offset", mark) + currentURL.RawQuery = q.Encode() + return currentURL.String(), nil +} + +// LastMarker returns the last offset in a ListResult. +func (r SharePage) LastMarker() (string, error) { + shares, err := ExtractShares(r) + if err != nil { + return invalidMarker, err + } + if len(shares) == 0 { + return invalidMarker, nil + } + + u, err := url.Parse(r.URL.String()) + if err != nil { + return invalidMarker, err + } + queryParams := u.Query() + offset := queryParams.Get("offset") + limit := queryParams.Get("limit") + + // Limit is not present, only one page required + if limit == "" { + return invalidMarker, nil + } + + iOffset := 0 + if offset != "" { + iOffset, err = strconv.Atoi(offset) + if err != nil { + return invalidMarker, err + } + } + iLimit, err := strconv.Atoi(limit) + if err != nil { + return invalidMarker, err + } + iOffset = iOffset + iLimit + offset = strconv.Itoa(iOffset) + + return offset, nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (r SharePage) IsEmpty() (bool, error) { + shares, err := ExtractShares(r) + return len(shares) == 0, err +} + +// ExtractShares extracts and returns a Share slice. It is used while +// iterating over a shares.List call. +func ExtractShares(r pagination.Page) ([]Share, error) { + var s struct { + Shares []Share `json:"shares"` + } + + err := (r.(SharePage)).ExtractInto(&s) + + return s.Shares, err +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// GetResult contains the response body and error from a Get request. +type GetResult struct { + commonResult +} + +// IDFromName is a convenience function that returns a share's ID given its name. +func IDFromName(client *gophercloud.ServiceClient, name string) (string, error) { + r, err := ListDetail(client, &ListOpts{Name: name}).AllPages() + if err != nil { + return "", err + } + + ss, err := ExtractShares(r) + if err != nil { + return "", err + } + + switch len(ss) { + case 0: + return "", gophercloud.ErrResourceNotFound{Name: name, ResourceType: "share"} + case 1: + return ss[0].ID, nil + default: + return "", gophercloud.ErrMultipleResourcesFound{Name: name, Count: len(ss), ResourceType: "share"} + } +} + +// GetExportLocationsResult contains the result body and error from an +// GetExportLocations request. +type GetExportLocationsResult struct { + gophercloud.Result +} + +// ExportLocation contains all information associated with a share export location +type ExportLocation struct { + // The export location path that should be used for mount operation. + Path string `json:"path"` + // The UUID of the share instance that this export location belongs to. + ShareInstanceID string `json:"share_instance_id"` + // Defines purpose of an export location. + // If set to true, then it is expected to be used for service needs + // and by administrators only. + // If it is set to false, then this export location can be used by end users. + IsAdminOnly bool `json:"is_admin_only"` + // The share export location UUID. + ID string `json:"id"` + // Drivers may use this field to identify which export locations are + // most efficient and should be used preferentially by clients. + // By default it is set to false value. New in version 2.14 + Preferred bool `json:"preferred"` +} + +// Extract will get the Export Locations from the commonResult +func (r GetExportLocationsResult) Extract() ([]ExportLocation, error) { + var s struct { + ExportLocations []ExportLocation `json:"export_locations"` + } + err := r.ExtractInto(&s) + return s.ExportLocations, err +} + +// AccessRight contains all information associated with an OpenStack share +// Grant Access Response +type AccessRight struct { + // The UUID of the share to which you are granted or denied access. + ShareID string `json:"share_id"` + // The access rule type that can be "ip", "cert" or "user". + AccessType string `json:"access_type,omitempty"` + // The value that defines the access that can be a valid format of IP, cert or user. + AccessTo string `json:"access_to,omitempty"` + // The access credential of the entity granted share access. + AccessKey string `json:"access_key,omitempty"` + // The access level to the share is either "rw" or "ro". + AccessLevel string `json:"access_level,omitempty"` + // The state of the access rule + State string `json:"state,omitempty"` + // The access rule ID. + ID string `json:"id"` +} + +// Extract will get the GrantAccess object from the commonResult +func (r GrantAccessResult) Extract() (*AccessRight, error) { + var s struct { + AccessRight *AccessRight `json:"access"` + } + err := r.ExtractInto(&s) + return s.AccessRight, err +} + +// GrantAccessResult contains the result body and error from an GrantAccess request. +type GrantAccessResult struct { + gophercloud.Result +} + +// RevokeAccessResult contains the response body and error from a Revoke access request. +type RevokeAccessResult struct { + gophercloud.ErrResult +} + +// Extract will get a slice of AccessRight objects from the commonResult +func (r ListAccessRightsResult) Extract() ([]AccessRight, error) { + var s struct { + AccessRights []AccessRight `json:"access_list"` + } + err := r.ExtractInto(&s) + return s.AccessRights, err +} + +// ListAccessRightsResult contains the result body and error from a ListAccessRights request. +type ListAccessRightsResult struct { + gophercloud.Result +} + +// ExtendResult contains the response body and error from an Extend request. +type ExtendResult struct { + gophercloud.ErrResult +} + +// ShrinkResult contains the response body and error from a Shrink request. +type ShrinkResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/fixtures.go new file mode 100644 index 000000000000..0e60d9142e83 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/fixtures.go @@ -0,0 +1,356 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +const ( + shareEndpoint = "/shares" + shareID = "011d21e2-fbc3-4e4a-9993-9ea223f73264" +) + +var createRequest = `{ + "share": { + "name": "my_test_share", + "size": 1, + "share_proto": "NFS" + } + }` + +var createResponse = `{ + "share": { + "name": "my_test_share", + "share_proto": "NFS", + "size": 1, + "status": null, + "share_server_id": null, + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", + "share_type_name": "default", + "availability_zone": null, + "created_at": "2015-09-18T10:25:24.533287", + "export_location": null, + "links": [ + { + "href": "http://172.18.198.54:8786/v1/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "self" + }, + { + "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "bookmark" + } + ], + "share_network_id": null, + "export_locations": [], + "host": null, + "access_rules_status": "active", + "has_replicas": false, + "replication_type": null, + "task_state": null, + "snapshot_support": true, + "consistency_group_id": "9397c191-8427-4661-a2e8-b23820dc01d4", + "source_cgsnapshot_member_id": null, + "volume_type": "default", + "snapshot_id": null, + "is_public": true, + "metadata": { + "project": "my_app", + "aim": "doc" + }, + "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "description": "My custom share London" + } + }` + +// MockCreateResponse creates a mock response +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, createRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, createResponse) + }) +} + +// MockDeleteResponse creates a mock delete response +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +var getResponse = `{ + "share": { + "links": [ + { + "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "self" + }, + { + "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "bookmark" + } + ], + "availability_zone": "nova", + "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", + "share_server_id": "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", + "snapshot_id": null, + "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "size": 1, + "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", + "share_type_name": "default", + "consistency_group_id": "9397c191-8427-4661-a2e8-b23820dc01d4", + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "metadata": { + "project": "my_app", + "aim": "doc" + }, + "status": "available", + "description": "My custom share London", + "host": "manila2@generic1#GENERIC1", + "has_replicas": false, + "replication_type": null, + "task_state": null, + "is_public": true, + "snapshot_support": true, + "name": "my_test_share", + "created_at": "2015-09-18T10:25:24.000000", + "share_proto": "NFS", + "volume_type": "default", + "source_cgsnapshot_member_id": null + } +}` + +// MockGetResponse creates a mock get response +func MockGetResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, getResponse) + }) +} + +var listDetailResponse = `{ + "shares": [ + { + "links": [ + { + "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "self" + }, + { + "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "bookmark" + } + ], + "availability_zone": "nova", + "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", + "share_server_id": "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", + "snapshot_id": null, + "id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "size": 1, + "share_type": "25747776-08e5-494f-ab40-a64b9d20d8f7", + "share_type_name": "default", + "consistency_group_id": "9397c191-8427-4661-a2e8-b23820dc01d4", + "project_id": "16e1ab15c35a457e9c2b2aa189f544e1", + "metadata": { + "project": "my_app", + "aim": "doc" + }, + "status": "available", + "description": "My custom share London", + "host": "manila2@generic1#GENERIC1", + "has_replicas": false, + "replication_type": null, + "task_state": null, + "is_public": true, + "snapshot_support": true, + "name": "my_test_share", + "created_at": "2015-09-18T10:25:24.000000", + "share_proto": "NFS", + "volume_type": "default", + "source_cgsnapshot_member_id": null + } + ] + }` + +var listDetailEmptyResponse = `{"shares": []}` + +// MockListDetailResponse creates a mock detailed-list response +func MockListDetailResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/detail", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + r.ParseForm() + marker := r.Form.Get("offset") + + switch marker { + case "": + fmt.Fprint(w, listDetailResponse) + default: + fmt.Fprint(w, listDetailEmptyResponse) + } + }) +} + +var getExportLocationsResponse = `{ + "export_locations": [ + { + "path": "127.0.0.1:/var/lib/manila/mnt/share-9a922036-ad26-4d27-b955-7a1e285fa74d", + "share_instance_id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "is_admin_only": false, + "id": "80ed63fc-83bc-4afc-b881-da4a345ac83d", + "preferred": false + } + ] +}` + +// MockGetExportLocationsResponse creates a mock get export locations response +func MockGetExportLocationsResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/export_locations", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, getExportLocationsResponse) + }) +} + +var grantAccessRequest = `{ + "allow_access": { + "access_type": "ip", + "access_to": "0.0.0.0/0", + "access_level": "rw" + } + }` + +var grantAccessResponse = `{ + "access": { + "share_id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "access_type": "ip", + "access_to": "0.0.0.0/0", + "access_key": "", + "access_level": "rw", + "state": "new", + "id": "a2f226a5-cee8-430b-8a03-78a59bd84ee8" + } +}` + +// MockGrantAccessResponse creates a mock grant access response +func MockGrantAccessResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, grantAccessRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, grantAccessResponse) + }) +} + +var revokeAccessRequest = `{ + "deny_access": { + "access_id": "a2f226a5-cee8-430b-8a03-78a59bd84ee8" + } +}` + +// MockRevokeAccessResponse creates a mock revoke access response +func MockRevokeAccessResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, revokeAccessRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +var listAccessRightsRequest = `{ + "access_list": null + }` + +var listAccessRightsResponse = `{ + "access_list": [ + { + "share_id": "011d21e2-fbc3-4e4a-9993-9ea223f73264", + "access_type": "ip", + "access_to": "0.0.0.0/0", + "access_key": "", + "access_level": "rw", + "state": "new", + "id": "a2f226a5-cee8-430b-8a03-78a59bd84ee8" + } + ] + }` + +// MockListAccessRightsResponse creates a mock list access response +func MockListAccessRightsResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, listAccessRightsRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, listAccessRightsResponse) + }) +} + +var extendRequest = `{ + "extend": { + "new_size": 2 + } + }` + +// MockExtendResponse creates a mock extend share response +func MockExtendResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, extendRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} + +var shrinkRequest = `{ + "shrink": { + "new_size": 1 + } + }` + +// MockShrinkResponse creates a mock shrink share response +func MockShrinkResponse(t *testing.T) { + th.Mux.HandleFunc(shareEndpoint+"/"+shareID+"/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, shrinkRequest) + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/request_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/request_test.go new file mode 100644 index 000000000000..7513c3345d26 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/testing/request_test.go @@ -0,0 +1,263 @@ +package testing + +import ( + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + options := &shares.CreateOpts{Size: 1, Name: "my_test_share", ShareProto: "NFS"} + n, err := shares.Create(client.ServiceClient(), options).Extract() + + th.AssertNoErr(t, err) + th.AssertEquals(t, n.Name, "my_test_share") + th.AssertEquals(t, n.Size, 1) + th.AssertEquals(t, n.ShareProto, "NFS") +} + +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + + result := shares.Delete(client.ServiceClient(), shareID) + th.AssertNoErr(t, result.Err) +} + +func TestGet(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetResponse(t) + + s, err := shares.Get(client.ServiceClient(), shareID).Extract() + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, s, &shares.Share{ + AvailabilityZone: "nova", + ShareNetworkID: "713df749-aac0-4a54-af52-10f6c991e80c", + ShareServerID: "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", + SnapshotID: "", + ID: shareID, + Size: 1, + ShareType: "25747776-08e5-494f-ab40-a64b9d20d8f7", + ShareTypeName: "default", + ConsistencyGroupID: "9397c191-8427-4661-a2e8-b23820dc01d4", + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Metadata: map[string]string{ + "project": "my_app", + "aim": "doc", + }, + Status: "available", + Description: "My custom share London", + Host: "manila2@generic1#GENERIC1", + HasReplicas: false, + ReplicationType: "", + TaskState: "", + SnapshotSupport: true, + Name: "my_test_share", + CreatedAt: time.Date(2015, time.September, 18, 10, 25, 24, 0, time.UTC), + ShareProto: "NFS", + VolumeType: "default", + SourceCgsnapshotMemberID: "", + IsPublic: true, + Links: []map[string]string{ + { + "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "self", + }, + { + "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "bookmark", + }, + }, + }) +} + +func TestListDetail(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListDetailResponse(t) + + allPages, err := shares.ListDetail(client.ServiceClient(), &shares.ListOpts{}).AllPages() + + th.AssertNoErr(t, err) + + actual, err := shares.ExtractShares(allPages) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, actual, []shares.Share{ + shares.Share{ + AvailabilityZone: "nova", + ShareNetworkID: "713df749-aac0-4a54-af52-10f6c991e80c", + ShareServerID: "e268f4aa-d571-43dd-9ab3-f49ad06ffaef", + SnapshotID: "", + ID: shareID, + Size: 1, + ShareType: "25747776-08e5-494f-ab40-a64b9d20d8f7", + ShareTypeName: "default", + ConsistencyGroupID: "9397c191-8427-4661-a2e8-b23820dc01d4", + ProjectID: "16e1ab15c35a457e9c2b2aa189f544e1", + Metadata: map[string]string{ + "project": "my_app", + "aim": "doc", + }, + Status: "available", + Description: "My custom share London", + Host: "manila2@generic1#GENERIC1", + HasReplicas: false, + ReplicationType: "", + TaskState: "", + SnapshotSupport: true, + Name: "my_test_share", + CreatedAt: time.Date(2015, time.September, 18, 10, 25, 24, 0, time.UTC), + ShareProto: "NFS", + VolumeType: "default", + SourceCgsnapshotMemberID: "", + IsPublic: true, + Links: []map[string]string{ + { + "href": "http://172.18.198.54:8786/v2/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "self", + }, + { + "href": "http://172.18.198.54:8786/16e1ab15c35a457e9c2b2aa189f544e1/shares/011d21e2-fbc3-4e4a-9993-9ea223f73264", + "rel": "bookmark", + }, + }, + }, + }) +} + +func TestGetExportLocationsSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetExportLocationsResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Get Export Locations is 2.14 + c.Microversion = "2.14" + + s, err := shares.GetExportLocations(c, shareID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, s, []shares.ExportLocation{ + { + Path: "127.0.0.1:/var/lib/manila/mnt/share-9a922036-ad26-4d27-b955-7a1e285fa74d", + ShareInstanceID: "011d21e2-fbc3-4e4a-9993-9ea223f73264", + IsAdminOnly: false, + ID: "80ed63fc-83bc-4afc-b881-da4a345ac83d", + Preferred: false, + }, + }) +} + +func TestGrantAcessSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGrantAccessResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Grant Access is 2.7 + c.Microversion = "2.7" + + var grantAccessReq shares.GrantAccessOpts + grantAccessReq.AccessType = "ip" + grantAccessReq.AccessTo = "0.0.0.0/0" + grantAccessReq.AccessLevel = "rw" + + s, err := shares.GrantAccess(c, shareID, grantAccessReq).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, s, &shares.AccessRight{ + ShareID: "011d21e2-fbc3-4e4a-9993-9ea223f73264", + AccessType: "ip", + AccessTo: "0.0.0.0/0", + AccessKey: "", + AccessLevel: "rw", + State: "new", + ID: "a2f226a5-cee8-430b-8a03-78a59bd84ee8", + }) +} + +func TestRevokeAccessSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockRevokeAccessResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Revoke Access is 2.7 + c.Microversion = "2.7" + + options := &shares.RevokeAccessOpts{AccessID: "a2f226a5-cee8-430b-8a03-78a59bd84ee8"} + + err := shares.RevokeAccess(c, shareID, options).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestListAccessRightsSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListAccessRightsResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Grant Access is 2.7 + c.Microversion = "2.7" + + s, err := shares.ListAccessRights(c, shareID).Extract() + + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, s, []shares.AccessRight{ + { + ShareID: "011d21e2-fbc3-4e4a-9993-9ea223f73264", + AccessType: "ip", + AccessTo: "0.0.0.0/0", + AccessKey: "", + AccessLevel: "rw", + State: "new", + ID: "a2f226a5-cee8-430b-8a03-78a59bd84ee8", + }, + }) +} + +func TestExtendSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockExtendResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Grant Access is 2.7 + c.Microversion = "2.7" + + err := shares.Extend(c, shareID, &shares.ExtendOpts{NewSize: 2}).ExtractErr() + th.AssertNoErr(t, err) +} + +func TestShrinkSuccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockShrinkResponse(t) + + c := client.ServiceClient() + // Client c must have Microversion set; minimum supported microversion for Grant Access is 2.7 + c.Microversion = "2.7" + + err := shares.Shrink(c, shareID, &shares.ShrinkOpts{NewSize: 1}).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go new file mode 100644 index 000000000000..f50589e7d360 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/shares/urls.go @@ -0,0 +1,43 @@ +package shares + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("shares") +} + +func listDetailURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("shares", "detail") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id) +} + +func getURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id) +} + +func getExportLocationsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "export_locations") +} + +func grantAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func revokeAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func listAccessRightsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func extendURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} + +func shrinkURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("shares", id, "action") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/requests.go new file mode 100644 index 000000000000..c2c2049b9fbb --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/requests.go @@ -0,0 +1,210 @@ +package sharetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// CreateOptsBuilder allows extensions to add additional parameters to the +// Create request. +type CreateOptsBuilder interface { + ToShareTypeCreateMap() (map[string]interface{}, error) +} + +// CreateOpts contains options for creating a ShareType. This object is +// passed to the sharetypes.Create function. For more information about +// these parameters, see the ShareType object. +type CreateOpts struct { + // The share type name + Name string `json:"name" required:"true"` + // Indicates whether a share type is publicly accessible + IsPublic bool `json:"os-share-type-access:is_public"` + // The extra specifications for the share type + ExtraSpecs ExtraSpecsOpts `json:"extra_specs" required:"true"` +} + +// ExtraSpecsOpts represent the extra specifications that can be selected for a share type +type ExtraSpecsOpts struct { + // An extra specification that defines the driver mode for share server, or storage, life cycle management + DriverHandlesShareServers bool `json:"driver_handles_share_servers" required:"true"` + // An extra specification that filters back ends by whether they do or do not support share snapshots + SnapshotSupport *bool `json:"snapshot_support,omitempty"` +} + +// ToShareTypeCreateMap assembles a request body based on the contents of a +// CreateOpts. +func (opts CreateOpts) ToShareTypeCreateMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "share_type") +} + +// Create will create a new ShareType based on the values in CreateOpts. To +// extract the ShareType object from the response, call the Extract method +// on the CreateResult. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + b, err := opts.ToShareTypeCreateMap() + if err != nil { + r.Err = err + return + } + _, r.Err = client.Post(createURL(client), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// Delete will delete the existing ShareType with the provided ID. +func Delete(client *gophercloud.ServiceClient, id string) (r DeleteResult) { + _, r.Err = client.Delete(deleteURL(client, id), nil) + return +} + +// ListOptsBuilder allows extensions to add additional parameters to the List +// request. +type ListOptsBuilder interface { + ToShareTypeListQuery() (string, error) +} + +// ListOpts holds options for listing ShareTypes. It is passed to the +// sharetypes.List function. +type ListOpts struct { + // Select if public types, private types, or both should be listed + IsPublic string `q:"is_public"` +} + +// ToShareTypeListQuery formats a ListOpts into a query string. +func (opts ListOpts) ToShareTypeListQuery() (string, error) { + q, err := gophercloud.BuildQueryString(opts) + return q.String(), err +} + +// List returns ShareTypes optionally limited by the conditions provided in ListOpts. +func List(client *gophercloud.ServiceClient, opts ListOptsBuilder) pagination.Pager { + url := listURL(client) + if opts != nil { + query, err := opts.ToShareTypeListQuery() + if err != nil { + return pagination.Pager{Err: err} + } + url += query + } + + return pagination.NewPager(client, url, func(r pagination.PageResult) pagination.Page { + return ShareTypePage{pagination.SinglePageBase(r)} + }) +} + +// GetDefault will retrieve the default ShareType. +func GetDefault(client *gophercloud.ServiceClient) (r GetDefaultResult) { + _, r.Err = client.Get(getDefaultURL(client), &r.Body, nil) + return +} + +// GetExtraSpecs will retrieve the extra specifications for a given ShareType. +func GetExtraSpecs(client *gophercloud.ServiceClient, id string) (r GetExtraSpecsResult) { + _, r.Err = client.Get(getExtraSpecsURL(client, id), &r.Body, nil) + return +} + +// SetExtraSpecsOptsBuilder allows extensions to add additional parameters to the +// SetExtraSpecs request. +type SetExtraSpecsOptsBuilder interface { + ToShareTypeSetExtraSpecsMap() (map[string]interface{}, error) +} + +type SetExtraSpecsOpts struct { + // A list of all extra specifications to be added to a ShareType + ExtraSpecs map[string]interface{} `json:"extra_specs" required:"true"` +} + +// ToShareTypeSetExtraSpecsMap assembles a request body based on the contents of a +// SetExtraSpecsOpts. +func (opts SetExtraSpecsOpts) ToShareTypeSetExtraSpecsMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "") +} + +// SetExtraSpecs will set new specifications for a ShareType based on the values +// in SetExtraSpecsOpts. To extract the extra specifications object from the response, +// call the Extract method on the SetExtraSpecsResult. +func SetExtraSpecs(client *gophercloud.ServiceClient, id string, opts SetExtraSpecsOptsBuilder) (r SetExtraSpecsResult) { + b, err := opts.ToShareTypeSetExtraSpecsMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(setExtraSpecsURL(client, id), b, &r.Body, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// UnsetExtraSpecs will unset an extra specification for an existing ShareType. +func UnsetExtraSpecs(client *gophercloud.ServiceClient, id string, key string) (r UnsetExtraSpecsResult) { + _, r.Err = client.Delete(unsetExtraSpecsURL(client, id, key), nil) + return +} + +// ShowAccess will show access details for an existing ShareType. +func ShowAccess(client *gophercloud.ServiceClient, id string) (r ShowAccessResult) { + _, r.Err = client.Get(showAccessURL(client, id), &r.Body, nil) + return +} + +// AddAccessOptsBuilder allows extensions to add additional parameters to the +// AddAccess +type AddAccessOptsBuilder interface { + ToAddAccessMap() (map[string]interface{}, error) +} + +type AccessOpts struct { + // The UUID of the project to which access to the share type is granted. + Project string `json:"project"` +} + +// ToAddAccessMap assembles a request body based on the contents of a +// AccessOpts. +func (opts AccessOpts) ToAddAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "addProjectAccess") +} + +// AddAccess will add access to a ShareType based on the values +// in AccessOpts. +func AddAccess(client *gophercloud.ServiceClient, id string, opts AddAccessOptsBuilder) (r AddAccessResult) { + b, err := opts.ToAddAccessMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(addAccessURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} + +// RemoveAccessOptsBuilder allows extensions to add additional parameters to the +// RemoveAccess +type RemoveAccessOptsBuilder interface { + ToRemoveAccessMap() (map[string]interface{}, error) +} + +// ToRemoveAccessMap assembles a request body based on the contents of a +// AccessOpts. +func (opts AccessOpts) ToRemoveAccessMap() (map[string]interface{}, error) { + return gophercloud.BuildRequestBody(opts, "removeProjectAccess") +} + +// RemoveAccess will remove access to a ShareType based on the values +// in AccessOpts. +func RemoveAccess(client *gophercloud.ServiceClient, id string, opts RemoveAccessOptsBuilder) (r RemoveAccessResult) { + b, err := opts.ToRemoveAccessMap() + if err != nil { + r.Err = err + return + } + + _, r.Err = client.Post(removeAccessURL(client, id), b, nil, &gophercloud.RequestOpts{ + OkCodes: []int{200, 202}, + }) + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/results.go new file mode 100644 index 000000000000..f60d757766d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/results.go @@ -0,0 +1,139 @@ +package sharetypes + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/pagination" +) + +// ShareType contains all the information associated with an OpenStack +// ShareType. +type ShareType struct { + // The Share Type ID + ID string `json:"id"` + // The Share Type name + Name string `json:"name"` + // Indicates whether a share type is publicly accessible + IsPublic bool `json:"os-share-type-access:is_public"` + // The required extra specifications for the share type + RequiredExtraSpecs map[string]interface{} `json:"required_extra_specs"` + // The extra specifications for the share type + ExtraSpecs map[string]interface{} `json:"extra_specs"` +} + +type commonResult struct { + gophercloud.Result +} + +// Extract will get the ShareType object out of the commonResult object. +func (r commonResult) Extract() (*ShareType, error) { + var s struct { + ShareType *ShareType `json:"share_type"` + } + err := r.ExtractInto(&s) + return s.ShareType, err +} + +// CreateResult contains the response body and error from a Create request. +type CreateResult struct { + commonResult +} + +// DeleteResult contains the response body and error from a Delete request. +type DeleteResult struct { + gophercloud.ErrResult +} + +// ShareTypePage is a pagination.pager that is returned from a call to the List function. +type ShareTypePage struct { + pagination.SinglePageBase +} + +// IsEmpty returns true if a ListResult contains no ShareTypes. +func (r ShareTypePage) IsEmpty() (bool, error) { + shareTypes, err := ExtractShareTypes(r) + return len(shareTypes) == 0, err +} + +// ExtractShareTypes extracts and returns ShareTypes. It is used while +// iterating over a sharetypes.List call. +func ExtractShareTypes(r pagination.Page) ([]ShareType, error) { + var s struct { + ShareTypes []ShareType `json:"share_types"` + } + err := (r.(ShareTypePage)).ExtractInto(&s) + return s.ShareTypes, err +} + +// GetDefaultResult contains the response body and error from a Get Default request. +type GetDefaultResult struct { + commonResult +} + +// ExtraSpecs contains all the information associated with extra specifications +// for an Openstack ShareType. +type ExtraSpecs map[string]interface{} + +type extraSpecsResult struct { + gophercloud.Result +} + +// Extract will get the ExtraSpecs object out of the commonResult object. +func (r extraSpecsResult) Extract() (ExtraSpecs, error) { + var s struct { + Specs ExtraSpecs `json:"extra_specs"` + } + err := r.ExtractInto(&s) + return s.Specs, err +} + +// GetExtraSpecsResult contains the response body and error from a Get Extra Specs request. +type GetExtraSpecsResult struct { + extraSpecsResult +} + +// SetExtraSpecsResult contains the response body and error from a Set Extra Specs request. +type SetExtraSpecsResult struct { + extraSpecsResult +} + +// UnsetExtraSpecsResult contains the response body and error from a Unset Extra Specs request. +type UnsetExtraSpecsResult struct { + gophercloud.ErrResult +} + +// ShareTypeAccess contains all the information associated with an OpenStack +// ShareTypeAccess. +type ShareTypeAccess struct { + // The share type ID of the member. + ShareTypeID string `json:"share_type_id"` + // The UUID of the project for which access to the share type is granted. + ProjectID string `json:"project_id"` +} + +type shareTypeAccessResult struct { + gophercloud.Result +} + +// ShowAccessResult contains the response body and error from a Show access request. +type ShowAccessResult struct { + shareTypeAccessResult +} + +// Extract will get the ShareTypeAccess objects out of the shareTypeAccessResult object. +func (r ShowAccessResult) Extract() ([]ShareTypeAccess, error) { + var s struct { + ShareTypeAccess []ShareTypeAccess `json:"share_type_access"` + } + err := r.ExtractInto(&s) + return s.ShareTypeAccess, err +} + +// AddAccessResult contains the response body and error from a Add Access request. +type AddAccessResult struct { + gophercloud.ErrResult +} + +// RemoveAccessResult contains the response body and error from a Remove Access request. +type RemoveAccessResult struct { + gophercloud.ErrResult +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/fixtures.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/fixtures.go new file mode 100644 index 000000000000..7ba85ed1890d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/fixtures.go @@ -0,0 +1,272 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func MockCreateResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "share_type": { + "os-share-type-access:is_public": true, + "extra_specs": { + "driver_handles_share_servers": true, + "snapshot_support": true + }, + "name": "my_new_share_type" + } + }`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` + { + "volume_type": { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": true + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "my_new_share_type", + "id": "1d600d02-26a7-4b23-af3d-7d51860fe858" + }, + "share_type": { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": true + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "my_new_share_type", + "id": "1d600d02-26a7-4b23-af3d-7d51860fe858" + } + }`) + }) +} + +func MockDeleteResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockListResponse(t *testing.T) { + th.Mux.HandleFunc("/types", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + fmt.Fprintf(w, ` + { + "volume_types": [ + { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": "True" + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "default", + "id": "be27425c-f807-4500-a056-d00721db45cf" + }, + { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": "false" + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "false" + }, + "name": "d", + "id": "f015bebe-c38b-4c49-8832-00143b10253b" + } + ], + "share_types": [ + { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": "True" + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "default", + "id": "be27425c-f807-4500-a056-d00721db45cf" + }, + { + "os-share-type-access:is_public": true, + "required_extra_specs": { + "driver_handles_share_servers": "false" + }, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "false" + }, + "name": "d", + "id": "f015bebe-c38b-4c49-8832-00143b10253b" + } + ] + }`) + }) +} + +func MockGetDefaultResponse(t *testing.T) { + th.Mux.HandleFunc("/types/default", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "volume_type": { + "required_extra_specs": null, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "default", + "id": "be27425c-f807-4500-a056-d00721db45cf" + }, + "share_type": { + "required_extra_specs": null, + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True" + }, + "name": "default", + "id": "be27425c-f807-4500-a056-d00721db45cf" + } + }`) + }) +} + +func MockGetExtraSpecsResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/extra_specs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "extra_specs": { + "snapshot_support": "True", + "driver_handles_share_servers": "True", + "my_custom_extra_spec": "False" + } + }`) + }) +} + +func MockSetExtraSpecsResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/extra_specs", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "extra_specs": { + "my_key": "my_value" + } + }`) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusAccepted) + + fmt.Fprintf(w, ` + { + "extra_specs": { + "my_key": "my_value" + } + }`) + }) +} + +func MockUnsetExtraSpecsResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/extra_specs/my_key", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "DELETE") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockShowAccessResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/share_type_access", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "GET") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + + w.Header().Add("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, ` + { + "share_type_access": [ + { + "share_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", + "project_id": "818a3f48dcd644909b3fa2e45a399a27" + }, + { + "share_type_id": "1732f284-401d-41d9-a494-425451e8b4b8", + "project_id": "e1284adea3ee4d2482af5ed214f3ad90" + } + ] + }`) + }) +} + +func MockAddAccessResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "addProjectAccess": { + "project": "e1284adea3ee4d2482af5ed214f3ad90" + } + }`) + w.WriteHeader(http.StatusAccepted) + }) +} + +func MockRemoveAccessResponse(t *testing.T) { + th.Mux.HandleFunc("/types/shareTypeID/action", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "application/json") + th.TestHeader(t, r, "Accept", "application/json") + th.TestJSONRequest(t, r, ` + { + "removeProjectAccess": { + "project": "e1284adea3ee4d2482af5ed214f3ad90" + } + }`) + w.WriteHeader(http.StatusAccepted) + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/requests_test.go new file mode 100644 index 000000000000..85a9e5a974f7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/testing/requests_test.go @@ -0,0 +1,217 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +// Verifies that a share type can be created correctly +func TestCreate(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockCreateResponse(t) + + snapshotSupport := true + extraSpecs := sharetypes.ExtraSpecsOpts{ + DriverHandlesShareServers: true, + SnapshotSupport: &snapshotSupport, + } + + options := &sharetypes.CreateOpts{ + Name: "my_new_share_type", + IsPublic: true, + ExtraSpecs: extraSpecs, + } + + st, err := sharetypes.Create(client.ServiceClient(), options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, st.Name, "my_new_share_type") + th.AssertEquals(t, st.IsPublic, true) +} + +// Verifies that a share type can't be created if the required parameters are missing +func TestCreateFails(t *testing.T) { + options := &sharetypes.CreateOpts{ + Name: "my_new_share_type", + } + + _, err := sharetypes.Create(client.ServiceClient(), options).Extract() + if _, ok := err.(gophercloud.ErrMissingInput); !ok { + t.Fatal("ErrMissingInput was expected to occur") + } + + extraSpecs := sharetypes.ExtraSpecsOpts{ + DriverHandlesShareServers: true, + } + + options = &sharetypes.CreateOpts{ + ExtraSpecs: extraSpecs, + } + + _, err = sharetypes.Create(client.ServiceClient(), options).Extract() + if _, ok := err.(gophercloud.ErrMissingInput); !ok { + t.Fatal("ErrMissingInput was expected to occur") + } +} + +// Verifies that share type deletion works +func TestDelete(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockDeleteResponse(t) + res := sharetypes.Delete(client.ServiceClient(), "shareTypeID") + th.AssertNoErr(t, res.Err) +} + +// Verifies that share types can be listed correctly +func TestList(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockListResponse(t) + + allPages, err := sharetypes.List(client.ServiceClient(), &sharetypes.ListOpts{}).AllPages() + th.AssertNoErr(t, err) + actual, err := sharetypes.ExtractShareTypes(allPages) + th.AssertNoErr(t, err) + expected := []sharetypes.ShareType{ + { + ID: "be27425c-f807-4500-a056-d00721db45cf", + Name: "default", + IsPublic: true, + ExtraSpecs: map[string]interface{}{"snapshot_support": "True", "driver_handles_share_servers": "True"}, + RequiredExtraSpecs: map[string]interface{}{"driver_handles_share_servers": "True"}, + }, + { + ID: "f015bebe-c38b-4c49-8832-00143b10253b", + Name: "d", + IsPublic: true, + ExtraSpecs: map[string]interface{}{"driver_handles_share_servers": "false", "snapshot_support": "True"}, + RequiredExtraSpecs: map[string]interface{}{"driver_handles_share_servers": "false"}, + }, + } + + th.CheckDeepEquals(t, expected, actual) +} + +// Verifies that it is possible to get the default share type +func TestGetDefault(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetDefaultResponse(t) + + expected := sharetypes.ShareType{ + ID: "be27425c-f807-4500-a056-d00721db45cf", + Name: "default", + ExtraSpecs: map[string]interface{}{"snapshot_support": "True", "driver_handles_share_servers": "True"}, + RequiredExtraSpecs: map[string]interface{}(nil), + } + + actual, err := sharetypes.GetDefault(client.ServiceClient()).Extract() + th.AssertNoErr(t, err) + th.CheckDeepEquals(t, &expected, actual) +} + +// Verifies that it is possible to get the extra specifications for a share type +func TestGetExtraSpecs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockGetExtraSpecsResponse(t) + + st, err := sharetypes.GetExtraSpecs(client.ServiceClient(), "shareTypeID").Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, st["snapshot_support"], "True") + th.AssertEquals(t, st["driver_handles_share_servers"], "True") + th.AssertEquals(t, st["my_custom_extra_spec"], "False") +} + +// Verifies that an extra specs can be added to a share type +func TestSetExtraSpecs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockSetExtraSpecsResponse(t) + + options := &sharetypes.SetExtraSpecsOpts{ + ExtraSpecs: map[string]interface{}{"my_key": "my_value"}, + } + + es, err := sharetypes.SetExtraSpecs(client.ServiceClient(), "shareTypeID", options).Extract() + th.AssertNoErr(t, err) + + th.AssertEquals(t, es["my_key"], "my_value") +} + +// Verifies that an extra specification can be unset for a share type +func TestUnsetExtraSpecs(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockUnsetExtraSpecsResponse(t) + res := sharetypes.UnsetExtraSpecs(client.ServiceClient(), "shareTypeID", "my_key") + th.AssertNoErr(t, res.Err) +} + +// Verifies that it is possible to see the access for a share type +func TestShowAccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockShowAccessResponse(t) + + expected := []sharetypes.ShareTypeAccess{ + { + ShareTypeID: "1732f284-401d-41d9-a494-425451e8b4b8", + ProjectID: "818a3f48dcd644909b3fa2e45a399a27", + }, + { + ShareTypeID: "1732f284-401d-41d9-a494-425451e8b4b8", + ProjectID: "e1284adea3ee4d2482af5ed214f3ad90", + }, + } + + shareType, err := sharetypes.ShowAccess(client.ServiceClient(), "shareTypeID").Extract() + th.AssertNoErr(t, err) + + th.CheckDeepEquals(t, expected, shareType) +} + +// Verifies that an access can be added to a share type +func TestAddAccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockAddAccessResponse(t) + + options := &sharetypes.AccessOpts{ + Project: "e1284adea3ee4d2482af5ed214f3ad90", + } + + err := sharetypes.AddAccess(client.ServiceClient(), "shareTypeID", options).ExtractErr() + th.AssertNoErr(t, err) +} + +// Verifies that an access can be removed from a share type +func TestRemoveAccess(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + MockRemoveAccessResponse(t) + + options := &sharetypes.AccessOpts{ + Project: "e1284adea3ee4d2482af5ed214f3ad90", + } + + err := sharetypes.RemoveAccess(client.ServiceClient(), "shareTypeID", options).ExtractErr() + th.AssertNoErr(t, err) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/urls.go new file mode 100644 index 000000000000..42779b1f97d6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/sharedfilesystems/v2/sharetypes/urls.go @@ -0,0 +1,43 @@ +package sharetypes + +import "github.com/gophercloud/gophercloud" + +func createURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types") +} + +func deleteURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id) +} + +func listURL(c *gophercloud.ServiceClient) string { + return createURL(c) +} + +func getDefaultURL(c *gophercloud.ServiceClient) string { + return c.ServiceURL("types", "default") +} + +func getExtraSpecsURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id, "extra_specs") +} + +func setExtraSpecsURL(c *gophercloud.ServiceClient, id string) string { + return getExtraSpecsURL(c, id) +} + +func unsetExtraSpecsURL(c *gophercloud.ServiceClient, id string, key string) string { + return c.ServiceURL("types", id, "extra_specs", key) +} + +func showAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id, "share_type_access") +} + +func addAccessURL(c *gophercloud.ServiceClient, id string) string { + return c.ServiceURL("types", id, "action") +} + +func removeAccessURL(c *gophercloud.ServiceClient, id string) string { + return addAccessURL(c, id) +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/testing/client_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/testing/client_test.go new file mode 100644 index 000000000000..6f94e44e0d8b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/testing/client_test.go @@ -0,0 +1,315 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + th "github.com/gophercloud/gophercloud/testhelper" +) + +const ID = "0123456789" + +func TestAuthenticatedClientV3(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/") + }) + + th.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", ID) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, `{ "token": { "expires_at": "2013-02-02T18:30:59.000000Z" } }`) + }) + + options := gophercloud.AuthOptions{ + Username: "me", + Password: "secret", + DomainName: "default", + TenantName: "project", + IdentityEndpoint: th.Endpoint(), + } + client, err := openstack.AuthenticatedClient(options) + th.AssertNoErr(t, err) + th.CheckEquals(t, ID, client.TokenID) +} + +func TestAuthenticatedClientV2(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "experimental", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/") + }) + + th.Mux.HandleFunc("/v2.0/tokens", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "access": { + "token": { + "id": "01234567890", + "expires": "2014-10-01T10:00:00.000000Z" + }, + "serviceCatalog": [ + { + "name": "Cloud Servers", + "type": "compute", + "endpoints": [ + { + "tenantId": "t1000", + "publicURL": "https://compute.north.host.com/v1/t1000", + "internalURL": "https://compute.north.internal/v1/t1000", + "region": "North", + "versionId": "1", + "versionInfo": "https://compute.north.host.com/v1/", + "versionList": "https://compute.north.host.com/" + }, + { + "tenantId": "t1000", + "publicURL": "https://compute.north.host.com/v1.1/t1000", + "internalURL": "https://compute.north.internal/v1.1/t1000", + "region": "North", + "versionId": "1.1", + "versionInfo": "https://compute.north.host.com/v1.1/", + "versionList": "https://compute.north.host.com/" + } + ], + "endpoints_links": [] + }, + { + "name": "Cloud Files", + "type": "object-store", + "endpoints": [ + { + "tenantId": "t1000", + "publicURL": "https://storage.north.host.com/v1/t1000", + "internalURL": "https://storage.north.internal/v1/t1000", + "region": "North", + "versionId": "1", + "versionInfo": "https://storage.north.host.com/v1/", + "versionList": "https://storage.north.host.com/" + }, + { + "tenantId": "t1000", + "publicURL": "https://storage.south.host.com/v1/t1000", + "internalURL": "https://storage.south.internal/v1/t1000", + "region": "South", + "versionId": "1", + "versionInfo": "https://storage.south.host.com/v1/", + "versionList": "https://storage.south.host.com/" + } + ] + } + ] + } + } + `) + }) + + options := gophercloud.AuthOptions{ + Username: "me", + Password: "secret", + IdentityEndpoint: th.Endpoint(), + } + client, err := openstack.AuthenticatedClient(options) + th.AssertNoErr(t, err) + th.CheckEquals(t, "01234567890", client.TokenID) +} + +func TestIdentityAdminV3Client(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s", "rel": "self" } + ] + } + ] + } + } + `, th.Endpoint()+"v3/", th.Endpoint()+"v2.0/") + }) + + th.Mux.HandleFunc("/v3/auth/tokens", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("X-Subject-Token", ID) + + w.WriteHeader(http.StatusCreated) + fmt.Fprintf(w, ` + { + "token": { + "audit_ids": ["VcxU2JYqT8OzfUVvrjEITQ", "qNUTIJntTzO1-XUk5STybw"], + "catalog": [ + { + "endpoints": [ + { + "id": "39dc322ce86c4111b4f06c2eeae0841b", + "interface": "public", + "region": "RegionOne", + "url": "http://localhost:5000" + }, + { + "id": "ec642f27474842e78bf059f6c48f4e99", + "interface": "internal", + "region": "RegionOne", + "url": "http://localhost:5000" + }, + { + "id": "c609fc430175452290b62a4242e8a7e8", + "interface": "admin", + "region": "RegionOne", + "url": "http://localhost:35357" + } + ], + "id": "4363ae44bdf34a3981fde3b823cb9aa2", + "type": "identity", + "name": "keystone" + } + ], + "expires_at": "2013-02-27T18:30:59.999999Z", + "is_domain": false, + "issued_at": "2013-02-27T16:30:59.999999Z", + "methods": [ + "password" + ], + "project": { + "domain": { + "id": "1789d1", + "name": "example.com" + }, + "id": "263fd9", + "name": "project-x" + }, + "roles": [ + { + "id": "76e72a", + "name": "admin" + }, + { + "id": "f4f392", + "name": "member" + } + ], + "service_providers": [ + { + "auth_url":"https://example.com:5000/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", + "id": "sp1", + "sp_url": "https://example.com:5000/Shibboleth.sso/SAML2/ECP" + }, + { + "auth_url":"https://other.example.com:5000/v3/OS-FEDERATION/identity_providers/acme/protocols/saml2/auth", + "id": "sp2", + "sp_url": "https://other.example.com:5000/Shibboleth.sso/SAML2/ECP" + } + ], + "user": { + "domain": { + "id": "1789d1", + "name": "example.com" + }, + "id": "0ca8f6", + "name": "Joe", + "password_expires_at": "2016-11-06T15:32:17.000000" + } + } +} + `) + }) + + options := gophercloud.AuthOptions{ + Username: "me", + Password: "secret", + DomainID: "12345", + IdentityEndpoint: th.Endpoint(), + } + pc, err := openstack.AuthenticatedClient(options) + th.AssertNoErr(t, err) + sc, err := openstack.NewIdentityV3(pc, gophercloud.EndpointOpts{ + Availability: gophercloud.AvailabilityAdmin, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, "http://localhost:35357/v3/", sc.Endpoint) +} + +func testAuthenticatedClientFails(t *testing.T, endpoint string) { + options := gophercloud.AuthOptions{ + Username: "me", + Password: "secret", + DomainName: "default", + TenantName: "project", + IdentityEndpoint: endpoint, + } + _, err := openstack.AuthenticatedClient(options) + if err == nil { + t.Fatal("expected error but call succeeded") + } +} + +func TestAuthenticatedClientV3Fails(t *testing.T) { + testAuthenticatedClientFails(t, "http://bad-address.example.com/v3") +} + +func TestAuthenticatedClientV2Fails(t *testing.T) { + testAuthenticatedClientFails(t, "http://bad-address.example.com/v2.0") +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/testing/doc.go new file mode 100644 index 000000000000..34cfe7ac6d84 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/testing/doc.go @@ -0,0 +1,2 @@ +// openstack +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/testing/endpoint_location_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/testing/endpoint_location_test.go new file mode 100644 index 000000000000..5ac773eee62c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/testing/endpoint_location_test.go @@ -0,0 +1,274 @@ +package testing + +import ( + "strings" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack" + tokens2 "github.com/gophercloud/gophercloud/openstack/identity/v2/tokens" + tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens" + th "github.com/gophercloud/gophercloud/testhelper" +) + +// Service catalog fixtures take too much vertical space! +var catalog2 = tokens2.ServiceCatalog{ + Entries: []tokens2.CatalogEntry{ + tokens2.CatalogEntry{ + Type: "same", + Name: "same", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://public.correct.com/", + InternalURL: "https://internal.correct.com/", + AdminURL: "https://admin.correct.com/", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badregion.com/", + }, + }, + }, + tokens2.CatalogEntry{ + Type: "same", + Name: "different", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://badname.com/", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badname.com/+badregion", + }, + }, + }, + tokens2.CatalogEntry{ + Type: "different", + Name: "different", + Endpoints: []tokens2.Endpoint{ + tokens2.Endpoint{ + Region: "same", + PublicURL: "https://badtype.com/+badname", + }, + tokens2.Endpoint{ + Region: "different", + PublicURL: "https://badtype.com/+badregion+badname", + }, + }, + }, + }, +} + +func TestV2EndpointExact(t *testing.T) { + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } + + for availability, expected := range expectedURLs { + actual, err := openstack.V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} + +func TestV2EndpointNone(t *testing.T) { + _, actual := openstack.V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "nope", + Availability: gophercloud.AvailabilityPublic, + }) + expected := &gophercloud.ErrEndpointNotFound{} + th.CheckEquals(t, expected.Error(), actual.Error()) +} + +func TestV2EndpointMultiple(t *testing.T) { + _, err := openstack.V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Region: "same", + Availability: gophercloud.AvailabilityPublic, + }) + if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") { + t.Errorf("Received unexpected error: %v", err) + } +} + +func TestV2EndpointBadAvailability(t *testing.T) { + _, err := openstack.V2EndpointURL(&catalog2, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: "wat", + }) + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) +} + +var catalog3 = tokens3.ServiceCatalog{ + Entries: []tokens3.CatalogEntry{ + tokens3.CatalogEntry{ + Type: "same", + Name: "same", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "1", + Region: "same", + Interface: "public", + URL: "https://public.correct.com/", + }, + tokens3.Endpoint{ + ID: "2", + Region: "same", + Interface: "admin", + URL: "https://admin.correct.com/", + }, + tokens3.Endpoint{ + ID: "3", + Region: "same", + Interface: "internal", + URL: "https://internal.correct.com/", + }, + tokens3.Endpoint{ + ID: "4", + Region: "different", + Interface: "public", + URL: "https://badregion.com/", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "same", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "5", + Region: "same", + Interface: "public", + URL: "https://badname.com/", + }, + tokens3.Endpoint{ + ID: "6", + Region: "different", + Interface: "public", + URL: "https://badname.com/+badregion", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "different", + Name: "different", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "7", + Region: "same", + Interface: "public", + URL: "https://badtype.com/+badname", + }, + tokens3.Endpoint{ + ID: "8", + Region: "different", + Interface: "public", + URL: "https://badtype.com/+badregion+badname", + }, + }, + }, + tokens3.CatalogEntry{ + Type: "someother", + Name: "someother", + Endpoints: []tokens3.Endpoint{ + tokens3.Endpoint{ + ID: "1", + Region: "someother", + Interface: "public", + URL: "https://public.correct.com/", + }, + tokens3.Endpoint{ + ID: "2", + RegionID: "someother", + Interface: "admin", + URL: "https://admin.correct.com/", + }, + tokens3.Endpoint{ + ID: "3", + RegionID: "someother", + Interface: "internal", + URL: "https://internal.correct.com/", + }, + }, + }, + }, +} + +func TestV3EndpointExact(t *testing.T) { + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } + + for availability, expected := range expectedURLs { + actual, err := openstack.V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} + +func TestV3EndpointNone(t *testing.T) { + _, actual := openstack.V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "nope", + Availability: gophercloud.AvailabilityPublic, + }) + expected := &gophercloud.ErrEndpointNotFound{} + th.CheckEquals(t, expected.Error(), actual.Error()) +} + +func TestV3EndpointMultiple(t *testing.T) { + _, err := openstack.V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Region: "same", + Availability: gophercloud.AvailabilityPublic, + }) + if !strings.HasPrefix(err.Error(), "Discovered 2 matching endpoints:") { + t.Errorf("Received unexpected error: %v", err) + } +} + +func TestV3EndpointBadAvailability(t *testing.T) { + _, err := openstack.V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "same", + Name: "same", + Region: "same", + Availability: "wat", + }) + th.CheckEquals(t, "Unexpected availability in endpoint query: wat", err.Error()) +} + +func TestV3EndpointWithRegionID(t *testing.T) { + expectedURLs := map[gophercloud.Availability]string{ + gophercloud.AvailabilityPublic: "https://public.correct.com/", + gophercloud.AvailabilityAdmin: "https://admin.correct.com/", + gophercloud.AvailabilityInternal: "https://internal.correct.com/", + } + + for availability, expected := range expectedURLs { + actual, err := openstack.V3EndpointURL(&catalog3, gophercloud.EndpointOpts{ + Type: "someother", + Name: "someother", + Region: "someother", + Availability: availability, + }) + th.AssertNoErr(t, err) + th.CheckEquals(t, expected, actual) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go new file mode 100644 index 000000000000..40080f7af203 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/base_endpoint.go @@ -0,0 +1,28 @@ +package utils + +import ( + "net/url" + "regexp" + "strings" +) + +// BaseEndpoint will return a URL without the /vX.Y +// portion of the URL. +func BaseEndpoint(endpoint string) (string, error) { + u, err := url.Parse(endpoint) + if err != nil { + return "", err + } + + u.RawQuery, u.Fragment = "", "" + + path := u.Path + versionRe := regexp.MustCompile("v[0-9.]+/?") + + if version := versionRe.FindString(path); version != "" { + versionIndex := strings.Index(path, version) + u.Path = path[:versionIndex] + } + + return u.String(), nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go new file mode 100644 index 000000000000..27da19f91a8d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/choose_version.go @@ -0,0 +1,111 @@ +package utils + +import ( + "fmt" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// Version is a supported API version, corresponding to a vN package within the appropriate service. +type Version struct { + ID string + Suffix string + Priority int +} + +var goodStatus = map[string]bool{ + "current": true, + "supported": true, + "stable": true, +} + +// ChooseVersion queries the base endpoint of an API to choose the most recent non-experimental alternative from a service's +// published versions. +// It returns the highest-Priority Version among the alternatives that are provided, as well as its corresponding endpoint. +func ChooseVersion(client *gophercloud.ProviderClient, recognized []*Version) (*Version, string, error) { + type linkResp struct { + Href string `json:"href"` + Rel string `json:"rel"` + } + + type valueResp struct { + ID string `json:"id"` + Status string `json:"status"` + Links []linkResp `json:"links"` + } + + type versionsResp struct { + Values []valueResp `json:"values"` + } + + type response struct { + Versions versionsResp `json:"versions"` + } + + normalize := func(endpoint string) string { + if !strings.HasSuffix(endpoint, "/") { + return endpoint + "/" + } + return endpoint + } + identityEndpoint := normalize(client.IdentityEndpoint) + + // If a full endpoint is specified, check version suffixes for a match first. + for _, v := range recognized { + if strings.HasSuffix(identityEndpoint, v.Suffix) { + return v, identityEndpoint, nil + } + } + + var resp response + _, err := client.Request("GET", client.IdentityBase, &gophercloud.RequestOpts{ + JSONResponse: &resp, + OkCodes: []int{200, 300}, + }) + + if err != nil { + return nil, "", err + } + + var highest *Version + var endpoint string + + for _, value := range resp.Versions.Values { + href := "" + for _, link := range value.Links { + if link.Rel == "self" { + href = normalize(link.Href) + } + } + + for _, version := range recognized { + if strings.Contains(value.ID, version.ID) { + // Prefer a version that exactly matches the provided endpoint. + if href == identityEndpoint { + if href == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", value.ID, client.IdentityBase) + } + return version, href, nil + } + + // Otherwise, find the highest-priority version with a whitelisted status. + if goodStatus[strings.ToLower(value.Status)] { + if highest == nil || version.Priority > highest.Priority { + highest = version + endpoint = href + } + } + } + } + } + + if highest == nil { + return nil, "", fmt.Errorf("No supported version available from endpoint %s", client.IdentityBase) + } + if endpoint == "" { + return nil, "", fmt.Errorf("Endpoint missing in version %s response from %s", highest.ID, client.IdentityBase) + } + + return highest, endpoint, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/base_endpoint_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/base_endpoint_test.go new file mode 100644 index 000000000000..944536b8bfc0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/base_endpoint_test.go @@ -0,0 +1,88 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud/openstack/utils" + th "github.com/gophercloud/gophercloud/testhelper" +) + +type endpointTestCases struct { + Endpoint string + BaseEndpoint string +} + +func TestBaseEndpoint(t *testing.T) { + tests := []endpointTestCases{ + { + Endpoint: "http://example.com:5000/v3", + BaseEndpoint: "http://example.com:5000/", + }, + { + Endpoint: "http://example.com:5000/v3.6", + BaseEndpoint: "http://example.com:5000/", + }, + { + Endpoint: "http://example.com:5000/v2.0", + BaseEndpoint: "http://example.com:5000/", + }, + { + Endpoint: "http://example.com:5000/", + BaseEndpoint: "http://example.com:5000/", + }, + { + Endpoint: "http://example.com:5000", + BaseEndpoint: "http://example.com:5000", + }, + { + Endpoint: "http://example.com/identity/v3", + BaseEndpoint: "http://example.com/identity/", + }, + { + Endpoint: "http://example.com/identity/v3.6", + BaseEndpoint: "http://example.com/identity/", + }, + { + Endpoint: "http://example.com/identity/v2.0", + BaseEndpoint: "http://example.com/identity/", + }, + { + Endpoint: "http://example.com/identity/v2.0/projects", + BaseEndpoint: "http://example.com/identity/", + }, + { + Endpoint: "http://example.com/v2.0/projects", + BaseEndpoint: "http://example.com/", + }, + { + Endpoint: "http://example.com/identity/", + BaseEndpoint: "http://example.com/identity/", + }, + { + Endpoint: "http://dev.example.com:5000/v3", + BaseEndpoint: "http://dev.example.com:5000/", + }, + { + Endpoint: "http://dev.example.com:5000/v3.6", + BaseEndpoint: "http://dev.example.com:5000/", + }, + { + Endpoint: "http://dev.example.com/identity/", + BaseEndpoint: "http://dev.example.com/identity/", + }, + { + Endpoint: "http://dev.example.com/identity/v2.0/projects", + BaseEndpoint: "http://dev.example.com/identity/", + }, + { + Endpoint: "http://dev.example.com/identity/v3.6", + BaseEndpoint: "http://dev.example.com/identity/", + }, + } + + for _, test := range tests { + actual, err := utils.BaseEndpoint(test.Endpoint) + th.AssertNoErr(t, err) + th.AssertEquals(t, test.BaseEndpoint, actual) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/choose_version_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/choose_version_test.go new file mode 100644 index 000000000000..9c0119cb28c2 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/choose_version_test.go @@ -0,0 +1,119 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/openstack/utils" + "github.com/gophercloud/gophercloud/testhelper" +) + +func setupVersionHandler() { + testhelper.Mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + fmt.Fprintf(w, ` + { + "versions": { + "values": [ + { + "status": "stable", + "id": "v3.0", + "links": [ + { "href": "%s/v3.0", "rel": "self" } + ] + }, + { + "status": "stable", + "id": "v2.0", + "links": [ + { "href": "%s/v2.0", "rel": "self" } + ] + } + ] + } + } + `, testhelper.Server.URL, testhelper.Server.URL) + }) +} + +func TestChooseVersion(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + setupVersionHandler() + + v2 := &utils.Version{ID: "v2.0", Priority: 2, Suffix: "blarg"} + v3 := &utils.Version{ID: "v3.0", Priority: 3, Suffix: "hargl"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: "", + } + v, endpoint, err := utils.ChooseVersion(c, []*utils.Version{v2, v3}) + + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v3 { + t.Errorf("Expected %#v to win, but %#v did instead", v3, v) + } + + expected := testhelper.Endpoint() + "v3.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} + +func TestChooseVersionOpinionatedLink(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + setupVersionHandler() + + v2 := &utils.Version{ID: "v2.0", Priority: 2, Suffix: "nope"} + v3 := &utils.Version{ID: "v3.0", Priority: 3, Suffix: "northis"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: testhelper.Endpoint() + "v2.0/", + } + v, endpoint, err := utils.ChooseVersion(c, []*utils.Version{v2, v3}) + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v2 { + t.Errorf("Expected %#v to win, but %#v did instead", v2, v) + } + + expected := testhelper.Endpoint() + "v2.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} + +func TestChooseVersionFromSuffix(t *testing.T) { + testhelper.SetupHTTP() + defer testhelper.TeardownHTTP() + + v2 := &utils.Version{ID: "v2.0", Priority: 2, Suffix: "/v2.0/"} + v3 := &utils.Version{ID: "v3.0", Priority: 3, Suffix: "/v3.0/"} + + c := &gophercloud.ProviderClient{ + IdentityBase: testhelper.Endpoint(), + IdentityEndpoint: testhelper.Endpoint() + "v2.0/", + } + v, endpoint, err := utils.ChooseVersion(c, []*utils.Version{v2, v3}) + if err != nil { + t.Fatalf("Unexpected error from ChooseVersion: %v", err) + } + + if v != v2 { + t.Errorf("Expected %#v to win, but %#v did instead", v2, v) + } + + expected := testhelper.Endpoint() + "v2.0/" + if endpoint != expected { + t.Errorf("Expected endpoint [%s], but was [%s] instead", expected, endpoint) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/doc.go new file mode 100644 index 000000000000..66ecc0798226 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/utils/testing/doc.go @@ -0,0 +1,2 @@ +//utils +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/doc.go b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/doc.go new file mode 100644 index 000000000000..a71c82df8005 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/doc.go @@ -0,0 +1,67 @@ +/* +Package workflows provides interaction with the workflows API in the OpenStack Mistral service. + +Workflow represents a process that can be described in a various number of ways and that can do some job interesting to the end user. +Each workflow consists of tasks (at least one) describing what exact steps should be made during workflow execution. + +Example to list workflows + + listOpts := workflows.ListOpts{ + Namespace: "some-namespace", + } + + allPages, err := workflows.List(mistralClient, listOpts).AllPages() + if err != nil { + panic(err) + } + + allWorkflows, err := workflows.ExtractWorkflows(allPages) + if err != nil { + panic(err) + } + + for _, workflow := range allWorkflows { + fmt.Printf("%+v\n", workflow) + } + +Example to create a workflow + + workflowDefinition := `--- +version: '2.0' + +create_vm: +description: Simple workflow example +type: direct + +input: + - vm_name + - image_ref + - flavor_ref +output: + vm_id: <% $.vm_id %> + +tasks: + create_server: + action: nova.servers_create name=<% $.vm_name %> image=<% $.image_ref %> flavor=<% $.flavor_ref %> + publish: + vm_id: <% task(create_server).result.id %> + on-success: + - wait_for_instance + + wait_for_instance: + action: nova.servers_find id=<% $.vm_id %> status='ACTIVE' + retry: + delay: 5 + count: 15` + + createOpts := &workflows.CreateOpts{ + Definition: strings.NewReader(workflowDefinition), + Namespace: "some-namespace", + } + + execution, err := workflows.Create(fake.ServiceClient(), opts).Extract() + if err != nil { + panic(err) + } +*/ +package workflows diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/requests.go b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/requests.go new file mode 100644 index 000000000000..34bcabe2231c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/requests.go @@ -0,0 +1,56 @@ +package workflows + +import ( + "io" + + "github.com/gophercloud/gophercloud" +) + +// CreateOptsBuilder allows extension to add additional parameters to the Create request. +type CreateOptsBuilder interface { + ToWorkflowCreateParams() (io.Reader, string, error) +} + +// CreateOpts specifies parameters used to create a cron trigger. +type CreateOpts struct { + // Scope is the scope of the workflow. + // Allowed values are "private" and "public". + Scope string `q:"scope"` + + // Namespace will define the namespace of the workflow. + Namespace string `q:"namespace"` + + // Definition is the workflow definition written in Mistral Workflow Language v2. + Definition io.Reader +} + +// ToWorkflowCreateParams constructs a request query string from CreateOpts. +func (opts CreateOpts) ToWorkflowCreateParams() (io.Reader, string, error) { + q, err := gophercloud.BuildQueryString(opts) + return opts.Definition, q.String(), err +} + +// Create requests the creation of a new execution. +func Create(client *gophercloud.ServiceClient, opts CreateOptsBuilder) (r CreateResult) { + url := createURL(client) + var b io.Reader + if opts != nil { + tmpB, query, err := opts.ToWorkflowCreateParams() + if err != nil { + r.Err = err + return + } + url += query + b = tmpB + } + + _, r.Err = client.Post(url, nil, &r.Body, &gophercloud.RequestOpts{ + RawBody: b, + MoreHeaders: map[string]string{ + "Content-Type": "text/plain", + "Accept": "", // Drop default JSON Accept header + }, + }) + + return +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/results.go b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/results.go new file mode 100644 index 000000000000..cccd59179420 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/results.go @@ -0,0 +1,82 @@ +package workflows + +import ( + "encoding/json" + "time" + + "github.com/gophercloud/gophercloud" +) + +// CreateResult is the response of a Post operations. Call its Extract method to interpret it as a list of Workflows. +type CreateResult struct { + gophercloud.Result +} + +// Extract helps to get created Workflow struct from a Create function. +func (r CreateResult) Extract() ([]Workflow, error) { + var s struct { + Workflows []Workflow `json:"workflows"` + } + err := r.ExtractInto(&s) + return s.Workflows, err +} + +// Workflow represents a workflow execution on OpenStack mistral API. +type Workflow struct { + // ID is the workflow's unique ID. + ID string `json:"id"` + + // Definition is the workflow definition in Mistral v2 DSL. + Definition string `json:"definition"` + + // Name is the name of the workflow. + Name string `json:"name"` + + // Namespace is the namespace of the workflow. + Namespace string `json:"namespace"` + + // Input represents the needed input to execute the workflow. + // This parameter is a list of each input, comma separated. + Input string `json:"input"` + + // ProjectID is the project id owner of the workflow. + ProjectID string `json:"project_id"` + + // Scope is the scope of the workflow. + // Values can be "private" or "public". + Scope string `json:"scope"` + + // Tags is a list of tags associated to the workflow. + Tags []string `json:"tags"` + + // CreatedAt is the creation date of the workflow. + CreatedAt time.Time `json:"-"` + + // UpdatedAt is the last update date of the workflow. + UpdatedAt *time.Time `json:"-"` +} + +// UnmarshalJSON implements unmarshalling custom types +func (r *Workflow) UnmarshalJSON(b []byte) error { + type tmp Workflow + var s struct { + tmp + CreatedAt gophercloud.JSONRFC3339ZNoTNoZ `json:"created_at"` + UpdatedAt *gophercloud.JSONRFC3339ZNoTNoZ `json:"updated_at"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = Workflow(s.tmp) + + r.CreatedAt = time.Time(s.CreatedAt) + if s.UpdatedAt != nil { + t := time.Time(*s.UpdatedAt) + r.UpdatedAt = &t + } + + return nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/testing/requests_test.go b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/testing/requests_test.go new file mode 100644 index 000000000000..e521073ee76b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/testing/requests_test.go @@ -0,0 +1,90 @@ +package testing + +import ( + "fmt" + "net/http" + "reflect" + "strings" + "testing" + "time" + + "github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows" + th "github.com/gophercloud/gophercloud/testhelper" + fake "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestCreateWorkflow(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + + definition := `--- +version: '2.0' + +simple_echo: + description: Simple workflow example + type: direct + + tasks: + test: + action: std.echo output="Hello World!"` + + th.Mux.HandleFunc("/workflows", func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, "POST") + th.TestHeader(t, r, "X-Auth-Token", fake.TokenID) + th.TestHeader(t, r, "Content-Type", "text/plain") + th.TestFormValues(t, r, map[string]string{ + "namespace": "some-namespace", + "scope": "private", + }) + th.TestBody(t, r, definition) + + w.WriteHeader(http.StatusCreated) + w.Header().Add("Content-Type", "application/json") + + fmt.Fprintf(w, `{ + "workflows": [ + { + "created_at": "1970-01-01 00:00:00", + "definition": "Workflow Definition in Mistral DSL v2", + "id": "1", + "input": "param1, param2", + "name": "flow", + "namespace": "some-namespace", + "project_id": "p1", + "scope": "private", + "updated_at": "1970-01-01 00:00:00" + } + ] + }`) + }) + + opts := &workflows.CreateOpts{ + Namespace: "some-namespace", + Scope: "private", + Definition: strings.NewReader(definition), + } + + actual, err := workflows.Create(fake.ServiceClient(), opts).Extract() + if err != nil { + t.Fatalf("Unable to create workflow: %v", err) + } + + updated := time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC) + expected := []workflows.Workflow{ + workflows.Workflow{ + ID: "1", + Definition: "Workflow Definition in Mistral DSL v2", + Name: "flow", + Namespace: "some-namespace", + Input: "param1, param2", + ProjectID: "p1", + Scope: "private", + CreatedAt: time.Date(1970, time.January, 1, 0, 0, 0, 0, time.UTC), + UpdatedAt: &updated, + }, + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Expected %#v, but was %#v", expected, actual) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/urls.go b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/urls.go new file mode 100644 index 000000000000..aca6dfe79295 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/openstack/workflow/v2/workflows/urls.go @@ -0,0 +1,9 @@ +package workflows + +import ( + "github.com/gophercloud/gophercloud" +) + +func createURL(client *gophercloud.ServiceClient) string { + return client.ServiceURL("workflows") +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/http.go b/vendor/github.com/gophercloud/gophercloud/pagination/http.go new file mode 100644 index 000000000000..757295c423ad --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/http.go @@ -0,0 +1,60 @@ +package pagination + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/gophercloud/gophercloud" +) + +// PageResult stores the HTTP response that returned the current page of results. +type PageResult struct { + gophercloud.Result + url.URL +} + +// PageResultFrom parses an HTTP response as JSON and returns a PageResult containing the +// results, interpreting it as JSON if the content type indicates. +func PageResultFrom(resp *http.Response) (PageResult, error) { + var parsedBody interface{} + + defer resp.Body.Close() + rawBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PageResult{}, err + } + + if strings.HasPrefix(resp.Header.Get("Content-Type"), "application/json") { + err = json.Unmarshal(rawBody, &parsedBody) + if err != nil { + return PageResult{}, err + } + } else { + parsedBody = rawBody + } + + return PageResultFromParsed(resp, parsedBody), err +} + +// PageResultFromParsed constructs a PageResult from an HTTP response that has already had its +// body parsed as JSON (and closed). +func PageResultFromParsed(resp *http.Response, body interface{}) PageResult { + return PageResult{ + Result: gophercloud.Result{ + Body: body, + Header: resp.Header, + }, + URL: *resp.Request.URL, + } +} + +// Request performs an HTTP request and extracts the http.Response from the result. +func Request(client *gophercloud.ServiceClient, headers map[string]string, url string) (*http.Response, error) { + return client.Get(url, nil, &gophercloud.RequestOpts{ + MoreHeaders: headers, + OkCodes: []int{200, 204, 300}, + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/linked.go b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go new file mode 100644 index 000000000000..3656fb7f8f47 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/linked.go @@ -0,0 +1,92 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// LinkedPageBase may be embedded to implement a page that provides navigational "Next" and "Previous" links within its result. +type LinkedPageBase struct { + PageResult + + // LinkPath lists the keys that should be traversed within a response to arrive at the "next" pointer. + // If any link along the path is missing, an empty URL will be returned. + // If any link results in an unexpected value type, an error will be returned. + // When left as "nil", []string{"links", "next"} will be used as a default. + LinkPath []string +} + +// NextPageURL extracts the pagination structure from a JSON response and returns the "next" link, if one is present. +// It assumes that the links are available in a "links" element of the top-level response object. +// If this is not the case, override NextPageURL on your result type. +func (current LinkedPageBase) NextPageURL() (string, error) { + var path []string + var key string + + if current.LinkPath == nil { + path = []string{"links", "next"} + } else { + path = current.LinkPath + } + + submap, ok := current.Body.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return "", err + } + + for { + key, path = path[0], path[1:len(path)] + + value, ok := submap[key] + if !ok { + return "", nil + } + + if len(path) > 0 { + submap, ok = value.(map[string]interface{}) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + } else { + if value == nil { + // Actual null element. + return "", nil + } + + url, ok := value.(string) + if !ok { + err := gophercloud.ErrUnexpectedType{} + err.Expected = "string" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(value)) + return "", err + } + + return url, nil + } + } +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current LinkedPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current LinkedPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/marker.go b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go new file mode 100644 index 000000000000..52e53bae8503 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/marker.go @@ -0,0 +1,58 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// MarkerPage is a stricter Page interface that describes additional functionality required for use with NewMarkerPager. +// For convenience, embed the MarkedPageBase struct. +type MarkerPage interface { + Page + + // LastMarker returns the last "marker" value on this page. + LastMarker() (string, error) +} + +// MarkerPageBase is a page in a collection that's paginated by "limit" and "marker" query parameters. +type MarkerPageBase struct { + PageResult + + // Owner is a reference to the embedding struct. + Owner MarkerPage +} + +// NextPageURL generates the URL for the page of results after this one. +func (current MarkerPageBase) NextPageURL() (string, error) { + currentURL := current.URL + + mark, err := current.Owner.LastMarker() + if err != nil { + return "", err + } + + q := currentURL.Query() + q.Set("marker", mark) + currentURL.RawQuery = q.Encode() + + return currentURL.String(), nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current MarkerPageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the linked page's body. This method is needed to satisfy the +// Page interface. +func (current MarkerPageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pager.go b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go new file mode 100644 index 000000000000..42c0b2dbe5b9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pager.go @@ -0,0 +1,251 @@ +package pagination + +import ( + "errors" + "fmt" + "net/http" + "reflect" + "strings" + + "github.com/gophercloud/gophercloud" +) + +var ( + // ErrPageNotAvailable is returned from a Pager when a next or previous page is requested, but does not exist. + ErrPageNotAvailable = errors.New("The requested page does not exist.") +) + +// Page must be satisfied by the result type of any resource collection. +// It allows clients to interact with the resource uniformly, regardless of whether or not or how it's paginated. +// Generally, rather than implementing this interface directly, implementors should embed one of the concrete PageBase structs, +// instead. +// Depending on the pagination strategy of a particular resource, there may be an additional subinterface that the result type +// will need to implement. +type Page interface { + // NextPageURL generates the URL for the page of data that follows this collection. + // Return "" if no such page exists. + NextPageURL() (string, error) + + // IsEmpty returns true if this Page has no items in it. + IsEmpty() (bool, error) + + // GetBody returns the Page Body. This is used in the `AllPages` method. + GetBody() interface{} +} + +// Pager knows how to advance through a specific resource collection, one page at a time. +type Pager struct { + client *gophercloud.ServiceClient + + initialURL string + + createPage func(r PageResult) Page + + firstPage Page + + Err error + + // Headers supplies additional HTTP headers to populate on each paged request. + Headers map[string]string +} + +// NewPager constructs a manually-configured pager. +// Supply the URL for the first page, a function that requests a specific page given a URL, and a function that counts a page. +func NewPager(client *gophercloud.ServiceClient, initialURL string, createPage func(r PageResult) Page) Pager { + return Pager{ + client: client, + initialURL: initialURL, + createPage: createPage, + } +} + +// WithPageCreator returns a new Pager that substitutes a different page creation function. This is +// useful for overriding List functions in delegation. +func (p Pager) WithPageCreator(createPage func(r PageResult) Page) Pager { + return Pager{ + client: p.client, + initialURL: p.initialURL, + createPage: createPage, + } +} + +func (p Pager) fetchNextPage(url string) (Page, error) { + resp, err := Request(p.client, p.Headers, url) + if err != nil { + return nil, err + } + + remembered, err := PageResultFrom(resp) + if err != nil { + return nil, err + } + + return p.createPage(remembered), nil +} + +// EachPage iterates over each page returned by a Pager, yielding one at a time to a handler function. +// Return "false" from the handler to prematurely stop iterating. +func (p Pager) EachPage(handler func(Page) (bool, error)) error { + if p.Err != nil { + return p.Err + } + currentURL := p.initialURL + for { + var currentPage Page + + // if first page has already been fetched, no need to fetch it again + if p.firstPage != nil { + currentPage = p.firstPage + p.firstPage = nil + } else { + var err error + currentPage, err = p.fetchNextPage(currentURL) + if err != nil { + return err + } + } + + empty, err := currentPage.IsEmpty() + if err != nil { + return err + } + if empty { + return nil + } + + ok, err := handler(currentPage) + if err != nil { + return err + } + if !ok { + return nil + } + + currentURL, err = currentPage.NextPageURL() + if err != nil { + return err + } + if currentURL == "" { + return nil + } + } +} + +// AllPages returns all the pages from a `List` operation in a single page, +// allowing the user to retrieve all the pages at once. +func (p Pager) AllPages() (Page, error) { + // pagesSlice holds all the pages until they get converted into as Page Body. + var pagesSlice []interface{} + // body will contain the final concatenated Page body. + var body reflect.Value + + // Grab a first page to ascertain the page body type. + firstPage, err := p.fetchNextPage(p.initialURL) + if err != nil { + return nil, err + } + // Store the page type so we can use reflection to create a new mega-page of + // that type. + pageType := reflect.TypeOf(firstPage) + + // if it's a single page, just return the firstPage (first page) + if _, found := pageType.FieldByName("SinglePageBase"); found { + return firstPage, nil + } + + // store the first page to avoid getting it twice + p.firstPage = firstPage + + // Switch on the page body type. Recognized types are `map[string]interface{}`, + // `[]byte`, and `[]interface{}`. + switch pb := firstPage.GetBody().(type) { + case map[string]interface{}: + // key is the map key for the page body if the body type is `map[string]interface{}`. + var key string + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().(map[string]interface{}) + for k, v := range b { + // If it's a linked page, we don't want the `links`, we want the other one. + if !strings.HasSuffix(k, "links") { + // check the field's type. we only want []interface{} (which is really []map[string]interface{}) + switch vt := v.(type) { + case []interface{}: + key = k + pagesSlice = append(pagesSlice, vt...) + } + } + } + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `map[string]interface{}` + body = reflect.MakeMap(reflect.MapOf(reflect.TypeOf(key), reflect.TypeOf(pagesSlice))) + body.SetMapIndex(reflect.ValueOf(key), reflect.ValueOf(pagesSlice)) + case []byte: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]byte) + pagesSlice = append(pagesSlice, b) + // seperate pages with a comma + pagesSlice = append(pagesSlice, []byte{10}) + return true, nil + }) + if err != nil { + return nil, err + } + if len(pagesSlice) > 0 { + // Remove the trailing comma. + pagesSlice = pagesSlice[:len(pagesSlice)-1] + } + var b []byte + // Combine the slice of slices in to a single slice. + for _, slice := range pagesSlice { + b = append(b, slice.([]byte)...) + } + // Set body to value of type `bytes`. + body = reflect.New(reflect.TypeOf(b)).Elem() + body.SetBytes(b) + case []interface{}: + // Iterate over the pages to concatenate the bodies. + err = p.EachPage(func(page Page) (bool, error) { + b := page.GetBody().([]interface{}) + pagesSlice = append(pagesSlice, b...) + return true, nil + }) + if err != nil { + return nil, err + } + // Set body to value of type `[]interface{}` + body = reflect.MakeSlice(reflect.TypeOf(pagesSlice), len(pagesSlice), len(pagesSlice)) + for i, s := range pagesSlice { + body.Index(i).Set(reflect.ValueOf(s)) + } + default: + err := gophercloud.ErrUnexpectedType{} + err.Expected = "map[string]interface{}/[]byte/[]interface{}" + err.Actual = fmt.Sprintf("%T", pb) + return nil, err + } + + // Each `Extract*` function is expecting a specific type of page coming back, + // otherwise the type assertion in those functions will fail. pageType is needed + // to create a type in this method that has the same type that the `Extract*` + // function is expecting and set the Body of that object to the concatenated + // pages. + page := reflect.New(pageType) + // Set the page body to be the concatenated pages. + page.Elem().FieldByName("Body").Set(body) + // Set any additional headers that were pass along. The `objectstorage` pacakge, + // for example, passes a Content-Type header. + h := make(http.Header) + for k, v := range p.Headers { + h.Add(k, v) + } + page.Elem().FieldByName("Header").Set(reflect.ValueOf(h)) + // Type assert the page to a Page interface so that the type assertion in the + // `Extract*` methods will work. + return page.Elem().Interface().(Page), err +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go new file mode 100644 index 000000000000..912daea36426 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/pkg.go @@ -0,0 +1,4 @@ +/* +Package pagination contains utilities and convenience structs that implement common pagination idioms within OpenStack APIs. +*/ +package pagination diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/single.go b/vendor/github.com/gophercloud/gophercloud/pagination/single.go new file mode 100644 index 000000000000..4251d6491efe --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/single.go @@ -0,0 +1,33 @@ +package pagination + +import ( + "fmt" + "reflect" + + "github.com/gophercloud/gophercloud" +) + +// SinglePageBase may be embedded in a Page that contains all of the results from an operation at once. +type SinglePageBase PageResult + +// NextPageURL always returns "" to indicate that there are no more pages to return. +func (current SinglePageBase) NextPageURL() (string, error) { + return "", nil +} + +// IsEmpty satisifies the IsEmpty method of the Page interface +func (current SinglePageBase) IsEmpty() (bool, error) { + if b, ok := current.Body.([]interface{}); ok { + return len(b) == 0, nil + } + err := gophercloud.ErrUnexpectedType{} + err.Expected = "[]interface{}" + err.Actual = fmt.Sprintf("%v", reflect.TypeOf(current.Body)) + return true, err +} + +// GetBody returns the single page's body. This method is needed to satisfy the +// Page interface. +func (current SinglePageBase) GetBody() interface{} { + return current.Body +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/pagination/testing/doc.go new file mode 100644 index 000000000000..0bc1eb3807c3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/testing/doc.go @@ -0,0 +1,2 @@ +// pagination +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/testing/linked_test.go b/vendor/github.com/gophercloud/gophercloud/pagination/testing/linked_test.go new file mode 100644 index 000000000000..3533e445a36b --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/testing/linked_test.go @@ -0,0 +1,112 @@ +package testing + +import ( + "fmt" + "net/http" + "reflect" + "testing" + + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" +) + +// LinkedPager sample and test cases. + +type LinkedPageResult struct { + pagination.LinkedPageBase +} + +func (r LinkedPageResult) IsEmpty() (bool, error) { + is, err := ExtractLinkedInts(r) + return len(is) == 0, err +} + +func ExtractLinkedInts(r pagination.Page) ([]int, error) { + var s struct { + Ints []int `json:"ints"` + } + err := (r.(LinkedPageResult)).ExtractInto(&s) + return s.Ints, err +} + +func createLinked(t *testing.T) pagination.Pager { + testhelper.SetupHTTP() + + testhelper.Mux.HandleFunc("/page1", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [1, 2, 3], "links": { "next": "%s/page2" } }`, testhelper.Server.URL) + }) + + testhelper.Mux.HandleFunc("/page2", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [4, 5, 6], "links": { "next": "%s/page3" } }`, testhelper.Server.URL) + }) + + testhelper.Mux.HandleFunc("/page3", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [7, 8, 9], "links": { "next": null } }`) + }) + + client := createClient() + + createPage := func(r pagination.PageResult) pagination.Page { + return LinkedPageResult{pagination.LinkedPageBase{PageResult: r}} + } + + return pagination.NewPager(client, testhelper.Server.URL+"/page1", createPage) +} + +func TestEnumerateLinked(t *testing.T) { + pager := createLinked(t) + defer testhelper.TeardownHTTP() + + callCount := 0 + err := pager.EachPage(func(page pagination.Page) (bool, error) { + actual, err := ExtractLinkedInts(page) + if err != nil { + return false, err + } + + t.Logf("Handler invoked with %v", actual) + + var expected []int + switch callCount { + case 0: + expected = []int{1, 2, 3} + case 1: + expected = []int{4, 5, 6} + case 2: + expected = []int{7, 8, 9} + default: + t.Fatalf("Unexpected call count: %d", callCount) + return false, nil + } + + if !reflect.DeepEqual(expected, actual) { + t.Errorf("Call %d: Expected %#v, but was %#v", callCount, expected, actual) + } + + callCount++ + return true, nil + }) + if err != nil { + t.Errorf("Unexpected error for page iteration: %v", err) + } + + if callCount != 3 { + t.Errorf("Expected 3 calls, but was %d", callCount) + } +} + +func TestAllPagesLinked(t *testing.T) { + pager := createLinked(t) + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []int{1, 2, 3, 4, 5, 6, 7, 8, 9} + actual, err := ExtractLinkedInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/testing/marker_test.go b/vendor/github.com/gophercloud/gophercloud/pagination/testing/marker_test.go new file mode 100644 index 000000000000..7b1a6daf4ae6 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/testing/marker_test.go @@ -0,0 +1,127 @@ +package testing + +import ( + "fmt" + "net/http" + "strings" + "testing" + + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" +) + +// MarkerPager sample and test cases. + +type MarkerPageResult struct { + pagination.MarkerPageBase +} + +func (r MarkerPageResult) IsEmpty() (bool, error) { + results, err := ExtractMarkerStrings(r) + if err != nil { + return true, err + } + return len(results) == 0, err +} + +func (r MarkerPageResult) LastMarker() (string, error) { + results, err := ExtractMarkerStrings(r) + if err != nil { + return "", err + } + if len(results) == 0 { + return "", nil + } + return results[len(results)-1], nil +} + +func createMarkerPaged(t *testing.T) pagination.Pager { + testhelper.SetupHTTP() + + testhelper.Mux.HandleFunc("/page", func(w http.ResponseWriter, r *http.Request) { + r.ParseForm() + ms := r.Form["marker"] + switch { + case len(ms) == 0: + fmt.Fprintf(w, "aaa\nbbb\nccc") + case len(ms) == 1 && ms[0] == "ccc": + fmt.Fprintf(w, "ddd\neee\nfff") + case len(ms) == 1 && ms[0] == "fff": + fmt.Fprintf(w, "ggg\nhhh\niii") + case len(ms) == 1 && ms[0] == "iii": + w.WriteHeader(http.StatusNoContent) + default: + t.Errorf("Request with unexpected marker: [%v]", ms) + } + }) + + client := createClient() + + createPage := func(r pagination.PageResult) pagination.Page { + p := MarkerPageResult{pagination.MarkerPageBase{PageResult: r}} + p.MarkerPageBase.Owner = p + return p + } + + return pagination.NewPager(client, testhelper.Server.URL+"/page", createPage) +} + +func ExtractMarkerStrings(page pagination.Page) ([]string, error) { + content := page.(MarkerPageResult).Body.([]uint8) + parts := strings.Split(string(content), "\n") + results := make([]string, 0, len(parts)) + for _, part := range parts { + if len(part) > 0 { + results = append(results, part) + } + } + return results, nil +} + +func TestEnumerateMarker(t *testing.T) { + pager := createMarkerPaged(t) + defer testhelper.TeardownHTTP() + + callCount := 0 + err := pager.EachPage(func(page pagination.Page) (bool, error) { + actual, err := ExtractMarkerStrings(page) + if err != nil { + return false, err + } + + t.Logf("Handler invoked with %v", actual) + + var expected []string + switch callCount { + case 0: + expected = []string{"aaa", "bbb", "ccc"} + case 1: + expected = []string{"ddd", "eee", "fff"} + case 2: + expected = []string{"ggg", "hhh", "iii"} + default: + t.Fatalf("Unexpected call count: %d", callCount) + return false, nil + } + + testhelper.CheckDeepEquals(t, expected, actual) + + callCount++ + return true, nil + }) + testhelper.AssertNoErr(t, err) + testhelper.AssertEquals(t, callCount, 3) +} + +func TestAllPagesMarker(t *testing.T) { + pager := createMarkerPaged(t) + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []string{"aaa", "bbb", "ccc", "ddd", "eee", "fff", "ggg", "hhh", "iii"} + actual, err := ExtractMarkerStrings(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/testing/pagination_test.go b/vendor/github.com/gophercloud/gophercloud/pagination/testing/pagination_test.go new file mode 100644 index 000000000000..170dca45ca39 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/testing/pagination_test.go @@ -0,0 +1,13 @@ +package testing + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/testhelper" +) + +func createClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{TokenID: "abc123"}, + Endpoint: testhelper.Endpoint(), + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/pagination/testing/single_test.go b/vendor/github.com/gophercloud/gophercloud/pagination/testing/single_test.go new file mode 100644 index 000000000000..8d95e948bfb9 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/pagination/testing/single_test.go @@ -0,0 +1,79 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud/pagination" + "github.com/gophercloud/gophercloud/testhelper" +) + +// SinglePage sample and test cases. + +type SinglePageResult struct { + pagination.SinglePageBase +} + +func (r SinglePageResult) IsEmpty() (bool, error) { + is, err := ExtractSingleInts(r) + if err != nil { + return true, err + } + return len(is) == 0, nil +} + +func ExtractSingleInts(r pagination.Page) ([]int, error) { + var s struct { + Ints []int `json:"ints"` + } + err := (r.(SinglePageResult)).ExtractInto(&s) + return s.Ints, err +} + +func setupSinglePaged() pagination.Pager { + testhelper.SetupHTTP() + client := createClient() + + testhelper.Mux.HandleFunc("/only", func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{ "ints": [1, 2, 3] }`) + }) + + createPage := func(r pagination.PageResult) pagination.Page { + return SinglePageResult{pagination.SinglePageBase(r)} + } + + return pagination.NewPager(client, testhelper.Server.URL+"/only", createPage) +} + +func TestEnumerateSinglePaged(t *testing.T) { + callCount := 0 + pager := setupSinglePaged() + defer testhelper.TeardownHTTP() + + err := pager.EachPage(func(page pagination.Page) (bool, error) { + callCount++ + + expected := []int{1, 2, 3} + actual, err := ExtractSingleInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) + return true, nil + }) + testhelper.CheckNoErr(t, err) + testhelper.CheckEquals(t, 1, callCount) +} + +func TestAllPagesSingle(t *testing.T) { + pager := setupSinglePaged() + defer testhelper.TeardownHTTP() + + page, err := pager.AllPages() + testhelper.AssertNoErr(t, err) + + expected := []int{1, 2, 3} + actual, err := ExtractSingleInts(page) + testhelper.AssertNoErr(t, err) + testhelper.CheckDeepEquals(t, expected, actual) +} diff --git a/vendor/github.com/gophercloud/gophercloud/params.go b/vendor/github.com/gophercloud/gophercloud/params.go new file mode 100644 index 000000000000..19b8cf7bf847 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/params.go @@ -0,0 +1,475 @@ +package gophercloud + +import ( + "encoding/json" + "fmt" + "net/url" + "reflect" + "strconv" + "strings" + "time" +) + +/* +BuildRequestBody builds a map[string]interface from the given `struct`. If +parent is not an empty string, the final map[string]interface returned will +encapsulate the built one. For example: + + disk := 1 + createOpts := flavors.CreateOpts{ + ID: "1", + Name: "m1.tiny", + Disk: &disk, + RAM: 512, + VCPUs: 1, + RxTxFactor: 1.0, + } + + body, err := gophercloud.BuildRequestBody(createOpts, "flavor") + +The above example can be run as-is, however it is recommended to look at how +BuildRequestBody is used within Gophercloud to more fully understand how it +fits within the request process as a whole rather than use it directly as shown +above. +*/ +func BuildRequestBody(opts interface{}, parent string) (map[string]interface{}, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]interface{}) + if optsValue.Kind() == reflect.Struct { + //fmt.Printf("optsValue.Kind() is a reflect.Struct: %+v\n", optsValue.Kind()) + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + + if f.Name != strings.Title(f.Name) { + //fmt.Printf("Skipping field: %s...\n", f.Name) + continue + } + + //fmt.Printf("Starting on field: %s...\n", f.Name) + + zero := isZero(v) + //fmt.Printf("v is zero?: %v\n", zero) + + // if the field has a required tag that's set to "true" + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + //fmt.Printf("Checking required field [%s]:\n\tv: %+v\n\tisZero:%v\n", f.Name, v.Interface(), zero) + // if the field's value is zero, return a missing-argument error + if zero { + // if the field has a 'required' tag, it can't have a zero-value + err := ErrMissingInput{} + err.Argument = f.Name + return nil, err + } + } + + if xorTag := f.Tag.Get("xor"); xorTag != "" { + //fmt.Printf("Checking `xor` tag for field [%s] with value %+v:\n\txorTag: %s\n", f.Name, v, xorTag) + xorField := optsValue.FieldByName(xorTag) + var xorFieldIsZero bool + if reflect.ValueOf(xorField.Interface()) == reflect.Zero(xorField.Type()) { + xorFieldIsZero = true + } else { + if xorField.Kind() == reflect.Ptr { + xorField = xorField.Elem() + } + xorFieldIsZero = isZero(xorField) + } + if !(zero != xorFieldIsZero) { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, xorTag) + err.Info = fmt.Sprintf("Exactly one of %s and %s must be provided", f.Name, xorTag) + return nil, err + } + } + + if orTag := f.Tag.Get("or"); orTag != "" { + //fmt.Printf("Checking `or` tag for field with:\n\tname: %+v\n\torTag:%s\n", f.Name, orTag) + //fmt.Printf("field is zero?: %v\n", zero) + if zero { + orField := optsValue.FieldByName(orTag) + var orFieldIsZero bool + if reflect.ValueOf(orField.Interface()) == reflect.Zero(orField.Type()) { + orFieldIsZero = true + } else { + if orField.Kind() == reflect.Ptr { + orField = orField.Elem() + } + orFieldIsZero = isZero(orField) + } + if orFieldIsZero { + err := ErrMissingInput{} + err.Argument = fmt.Sprintf("%s/%s", f.Name, orTag) + err.Info = fmt.Sprintf("At least one of %s and %s must be provided", f.Name, orTag) + return nil, err + } + } + } + + jsonTag := f.Tag.Get("json") + if jsonTag == "-" { + continue + } + + if v.Kind() == reflect.Struct || (v.Kind() == reflect.Ptr && v.Elem().Kind() == reflect.Struct) { + if zero { + //fmt.Printf("value before change: %+v\n", optsValue.Field(i)) + if jsonTag != "" { + jsonTagPieces := strings.Split(jsonTag, ",") + if len(jsonTagPieces) > 1 && jsonTagPieces[1] == "omitempty" { + if v.CanSet() { + if !v.IsNil() { + if v.Kind() == reflect.Ptr { + v.Set(reflect.Zero(v.Type())) + } + } + //fmt.Printf("value after change: %+v\n", optsValue.Field(i)) + } + } + } + continue + } + + //fmt.Printf("Calling BuildRequestBody with:\n\tv: %+v\n\tf.Name:%s\n", v.Interface(), f.Name) + _, err := BuildRequestBody(v.Interface(), f.Name) + if err != nil { + return nil, err + } + } + } + + //fmt.Printf("opts: %+v \n", opts) + + b, err := json.Marshal(opts) + if err != nil { + return nil, err + } + + //fmt.Printf("string(b): %s\n", string(b)) + + err = json.Unmarshal(b, &optsMap) + if err != nil { + return nil, err + } + + //fmt.Printf("optsMap: %+v\n", optsMap) + + if parent != "" { + optsMap = map[string]interface{}{parent: optsMap} + } + //fmt.Printf("optsMap after parent added: %+v\n", optsMap) + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +// EnabledState is a convenience type, mostly used in Create and Update +// operations. Because the zero value of a bool is FALSE, we need to use a +// pointer instead to indicate zero-ness. +type EnabledState *bool + +// Convenience vars for EnabledState values. +var ( + iTrue = true + iFalse = false + + Enabled EnabledState = &iTrue + Disabled EnabledState = &iFalse +) + +// IPVersion is a type for the possible IP address versions. Valid instances +// are IPv4 and IPv6 +type IPVersion int + +const ( + // IPv4 is used for IP version 4 addresses + IPv4 IPVersion = 4 + // IPv6 is used for IP version 6 addresses + IPv6 IPVersion = 6 +) + +// IntToPointer is a function for converting integers into integer pointers. +// This is useful when passing in options to operations. +func IntToPointer(i int) *int { + return &i +} + +/* +MaybeString is an internal function to be used by request methods in individual +resource packages. + +It takes a string that might be a zero value and returns either a pointer to its +address or nil. This is useful for allowing users to conveniently omit values +from an options struct by leaving them zeroed, but still pass nil to the JSON +serializer so they'll be omitted from the request body. +*/ +func MaybeString(original string) *string { + if original != "" { + return &original + } + return nil +} + +/* +MaybeInt is an internal function to be used by request methods in individual +resource packages. + +Like MaybeString, it accepts an int that may or may not be a zero value, and +returns either a pointer to its address or nil. It's intended to hint that the +JSON serializer should omit its field. +*/ +func MaybeInt(original int) *int { + if original != 0 { + return &original + } + return nil +} + +/* +func isUnderlyingStructZero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Ptr: + return isUnderlyingStructZero(v.Elem()) + default: + return isZero(v) + } +} +*/ + +var t time.Time + +func isZero(v reflect.Value) bool { + //fmt.Printf("\n\nchecking isZero for value: %+v\n", v) + switch v.Kind() { + case reflect.Ptr: + if v.IsNil() { + return true + } + return false + case reflect.Func, reflect.Map, reflect.Slice: + return v.IsNil() + case reflect.Array: + z := true + for i := 0; i < v.Len(); i++ { + z = z && isZero(v.Index(i)) + } + return z + case reflect.Struct: + if v.Type() == reflect.TypeOf(t) { + if v.Interface().(time.Time).IsZero() { + return true + } + return false + } + z := true + for i := 0; i < v.NumField(); i++ { + z = z && isZero(v.Field(i)) + } + return z + } + // Compare other types directly: + z := reflect.Zero(v.Type()) + //fmt.Printf("zero type for value: %+v\n\n\n", z) + return v.Interface() == z.Interface() +} + +/* +BuildQueryString is an internal function to be used by request methods in +individual resource packages. + +It accepts a tagged structure and expands it into a URL struct. Field names are +converted into query parameters based on a "q" tag. For example: + + type struct Something { + Bar string `q:"x_bar"` + Baz int `q:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into "?x_bar=AAA&lorem_ipsum=BBB". + +The struct's fields may be strings, integers, or boolean values. Fields left at +their type's zero value will be omitted from the query. +*/ +func BuildQueryString(opts interface{}) (*url.URL, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + params := url.Values{} + + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + qTag := f.Tag.Get("q") + + // if the field has a 'q' tag, it goes in the query string + if qTag != "" { + tags := strings.Split(qTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + loop: + switch v.Kind() { + case reflect.Ptr: + v = v.Elem() + goto loop + case reflect.String: + params.Add(tags[0], v.String()) + case reflect.Int: + params.Add(tags[0], strconv.FormatInt(v.Int(), 10)) + case reflect.Bool: + params.Add(tags[0], strconv.FormatBool(v.Bool())) + case reflect.Slice: + switch v.Type().Elem() { + case reflect.TypeOf(0): + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], strconv.FormatInt(v.Index(i).Int(), 10)) + } + default: + for i := 0; i < v.Len(); i++ { + params.Add(tags[0], v.Index(i).String()) + } + } + case reflect.Map: + if v.Type().Key().Kind() == reflect.String && v.Type().Elem().Kind() == reflect.String { + var s []string + for _, k := range v.MapKeys() { + value := v.MapIndex(k).String() + s = append(s, fmt.Sprintf("'%s':'%s'", k.String(), value)) + } + params.Add(tags[0], fmt.Sprintf("{%s}", strings.Join(s, ", "))) + } + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return &url.URL{}, fmt.Errorf("Required query parameter [%s] not set.", f.Name) + } + } + } + } + + return &url.URL{RawQuery: params.Encode()}, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return nil, fmt.Errorf("Options type is not a struct.") +} + +/* +BuildHeaders is an internal function to be used by request methods in +individual resource packages. + +It accepts an arbitrary tagged structure and produces a string map that's +suitable for use as the HTTP headers of an outgoing request. Field names are +mapped to header names based in "h" tags. + + type struct Something { + Bar string `h:"x_bar"` + Baz int `h:"lorem_ipsum"` + } + + instance := Something{ + Bar: "AAA", + Baz: "BBB", + } + +will be converted into: + + map[string]string{ + "x_bar": "AAA", + "lorem_ipsum": "BBB", + } + +Untagged fields and fields left at their zero values are skipped. Integers, +booleans and string values are supported. +*/ +func BuildHeaders(opts interface{}) (map[string]string, error) { + optsValue := reflect.ValueOf(opts) + if optsValue.Kind() == reflect.Ptr { + optsValue = optsValue.Elem() + } + + optsType := reflect.TypeOf(opts) + if optsType.Kind() == reflect.Ptr { + optsType = optsType.Elem() + } + + optsMap := make(map[string]string) + if optsValue.Kind() == reflect.Struct { + for i := 0; i < optsValue.NumField(); i++ { + v := optsValue.Field(i) + f := optsType.Field(i) + hTag := f.Tag.Get("h") + + // if the field has a 'h' tag, it goes in the header + if hTag != "" { + tags := strings.Split(hTag, ",") + + // if the field is set, add it to the slice of query pieces + if !isZero(v) { + switch v.Kind() { + case reflect.String: + optsMap[tags[0]] = v.String() + case reflect.Int: + optsMap[tags[0]] = strconv.FormatInt(v.Int(), 10) + case reflect.Bool: + optsMap[tags[0]] = strconv.FormatBool(v.Bool()) + } + } else { + // if the field has a 'required' tag, it can't have a zero-value + if requiredTag := f.Tag.Get("required"); requiredTag == "true" { + return optsMap, fmt.Errorf("Required header [%s] not set.", f.Name) + } + } + } + + } + return optsMap, nil + } + // Return an error if the underlying type of 'opts' isn't a struct. + return optsMap, fmt.Errorf("Options type is not a struct.") +} + +// IDSliceToQueryString takes a slice of elements and converts them into a query +// string. For example, if name=foo and slice=[]int{20, 40, 60}, then the +// result would be `?name=20&name=40&name=60' +func IDSliceToQueryString(name string, ids []int) string { + str := "" + for k, v := range ids { + if k == 0 { + str += "?" + } else { + str += "&" + } + str += fmt.Sprintf("%s=%s", name, strconv.Itoa(v)) + } + return str +} + +// IntWithinRange returns TRUE if an integer falls within a defined range, and +// FALSE if not. +func IntWithinRange(val, min, max int) bool { + return val > min && val < max +} diff --git a/vendor/github.com/gophercloud/gophercloud/provider_client.go b/vendor/github.com/gophercloud/gophercloud/provider_client.go new file mode 100644 index 000000000000..95fa11a78863 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/provider_client.go @@ -0,0 +1,391 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "io" + "io/ioutil" + "net/http" + "strings" + "sync" +) + +// DefaultUserAgent is the default User-Agent string set in the request header. +const DefaultUserAgent = "gophercloud/2.0.0" + +// UserAgent represents a User-Agent header. +type UserAgent struct { + // prepend is the slice of User-Agent strings to prepend to DefaultUserAgent. + // All the strings to prepend are accumulated and prepended in the Join method. + prepend []string +} + +// Prepend prepends a user-defined string to the default User-Agent string. Users +// may pass in one or more strings to prepend. +func (ua *UserAgent) Prepend(s ...string) { + ua.prepend = append(s, ua.prepend...) +} + +// Join concatenates all the user-defined User-Agend strings with the default +// Gophercloud User-Agent string. +func (ua *UserAgent) Join() string { + uaSlice := append(ua.prepend, DefaultUserAgent) + return strings.Join(uaSlice, " ") +} + +// ProviderClient stores details that are required to interact with any +// services within a specific provider's API. +// +// Generally, you acquire a ProviderClient by calling the NewClient method in +// the appropriate provider's child package, providing whatever authentication +// credentials are required. +type ProviderClient struct { + // IdentityBase is the base URL used for a particular provider's identity + // service - it will be used when issuing authenticatation requests. It + // should point to the root resource of the identity service, not a specific + // identity version. + IdentityBase string + + // IdentityEndpoint is the identity endpoint. This may be a specific version + // of the identity service. If this is the case, this endpoint is used rather + // than querying versions first. + IdentityEndpoint string + + // TokenID is the ID of the most recently issued valid token. + // NOTE: Aside from within a custom ReauthFunc, this field shouldn't be set by an application. + // To safely read or write this value, call `Token` or `SetToken`, respectively + TokenID string + + // EndpointLocator describes how this provider discovers the endpoints for + // its constituent services. + EndpointLocator EndpointLocator + + // HTTPClient allows users to interject arbitrary http, https, or other transit behaviors. + HTTPClient http.Client + + // UserAgent represents the User-Agent header in the HTTP request. + UserAgent UserAgent + + // ReauthFunc is the function used to re-authenticate the user if the request + // fails with a 401 HTTP response code. This a needed because there may be multiple + // authentication functions for different Identity service versions. + ReauthFunc func() error + + mut *sync.RWMutex + + reauthmut *reauthlock +} + +type reauthlock struct { + sync.RWMutex + reauthing bool +} + +// AuthenticatedHeaders returns a map of HTTP headers that are common for all +// authenticated service requests. +func (client *ProviderClient) AuthenticatedHeaders() (m map[string]string) { + if client.reauthmut != nil { + client.reauthmut.RLock() + if client.reauthmut.reauthing { + client.reauthmut.RUnlock() + return + } + client.reauthmut.RUnlock() + } + t := client.Token() + if t == "" { + return + } + return map[string]string{"X-Auth-Token": t} +} + +// UseTokenLock creates a mutex that is used to allow safe concurrent access to the auth token. +// If the application's ProviderClient is not used concurrently, this doesn't need to be called. +func (client *ProviderClient) UseTokenLock() { + client.mut = new(sync.RWMutex) + client.reauthmut = new(reauthlock) +} + +// Token safely reads the value of the auth token from the ProviderClient. Applications should +// call this method to access the token instead of the TokenID field +func (client *ProviderClient) Token() string { + if client.mut != nil { + client.mut.RLock() + defer client.mut.RUnlock() + } + return client.TokenID +} + +// SetToken safely sets the value of the auth token in the ProviderClient. Applications may +// use this method in a custom ReauthFunc +func (client *ProviderClient) SetToken(t string) { + if client.mut != nil { + client.mut.Lock() + defer client.mut.Unlock() + } + client.TokenID = t +} + +//Reauthenticate calls client.ReauthFunc in a thread-safe way. If this is +//called because of a 401 response, the caller may pass the previous token. In +//this case, the reauthentication can be skipped if another thread has already +//reauthenticated in the meantime. If no previous token is known, an empty +//string should be passed instead to force unconditional reauthentication. +func (client *ProviderClient) Reauthenticate(previousToken string) (err error) { + if client.ReauthFunc == nil { + return nil + } + + if client.mut == nil { + return client.ReauthFunc() + } + client.mut.Lock() + defer client.mut.Unlock() + + client.reauthmut.Lock() + client.reauthmut.reauthing = true + client.reauthmut.Unlock() + + if previousToken == "" || client.TokenID == previousToken { + err = client.ReauthFunc() + } + + client.reauthmut.Lock() + client.reauthmut.reauthing = false + client.reauthmut.Unlock() + return +} + +// RequestOpts customizes the behavior of the provider.Request() method. +type RequestOpts struct { + // JSONBody, if provided, will be encoded as JSON and used as the body of the HTTP request. The + // content type of the request will default to "application/json" unless overridden by MoreHeaders. + // It's an error to specify both a JSONBody and a RawBody. + JSONBody interface{} + // RawBody contains an io.Reader that will be consumed by the request directly. No content-type + // will be set unless one is provided explicitly by MoreHeaders. + RawBody io.Reader + // JSONResponse, if provided, will be populated with the contents of the response body parsed as + // JSON. + JSONResponse interface{} + // OkCodes contains a list of numeric HTTP status codes that should be interpreted as success. If + // the response has a different code, an error will be returned. + OkCodes []int + // MoreHeaders specifies additional HTTP headers to be provide on the request. If a header is + // provided with a blank value (""), that header will be *omitted* instead: use this to suppress + // the default Accept header or an inferred Content-Type, for example. + MoreHeaders map[string]string + // ErrorContext specifies the resource error type to return if an error is encountered. + // This lets resources override default error messages based on the response status code. + ErrorContext error +} + +var applicationJSON = "application/json" + +// Request performs an HTTP request using the ProviderClient's current HTTPClient. An authentication +// header will automatically be provided. +func (client *ProviderClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + var body io.Reader + var contentType *string + + // Derive the content body by either encoding an arbitrary object as JSON, or by taking a provided + // io.ReadSeeker as-is. Default the content-type to application/json. + if options.JSONBody != nil { + if options.RawBody != nil { + panic("Please provide only one of JSONBody or RawBody to gophercloud.Request().") + } + + rendered, err := json.Marshal(options.JSONBody) + if err != nil { + return nil, err + } + + body = bytes.NewReader(rendered) + contentType = &applicationJSON + } + + if options.RawBody != nil { + body = options.RawBody + } + + // Construct the http.Request. + req, err := http.NewRequest(method, url, body) + if err != nil { + return nil, err + } + + // Populate the request headers. Apply options.MoreHeaders last, to give the caller the chance to + // modify or omit any header. + if contentType != nil { + req.Header.Set("Content-Type", *contentType) + } + req.Header.Set("Accept", applicationJSON) + + // Set the User-Agent header + req.Header.Set("User-Agent", client.UserAgent.Join()) + + if options.MoreHeaders != nil { + for k, v := range options.MoreHeaders { + if v != "" { + req.Header.Set(k, v) + } else { + req.Header.Del(k) + } + } + } + + // get latest token from client + for k, v := range client.AuthenticatedHeaders() { + req.Header.Set(k, v) + } + + // Set connection parameter to close the connection immediately when we've got the response + req.Close = true + + prereqtok := req.Header.Get("X-Auth-Token") + + // Issue the request. + resp, err := client.HTTPClient.Do(req) + if err != nil { + return nil, err + } + + // Allow default OkCodes if none explicitly set + if options.OkCodes == nil { + options.OkCodes = defaultOkCodes(method) + } + + // Validate the HTTP response status. + var ok bool + for _, code := range options.OkCodes { + if resp.StatusCode == code { + ok = true + break + } + } + + if !ok { + body, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + respErr := ErrUnexpectedResponseCode{ + URL: url, + Method: method, + Expected: options.OkCodes, + Actual: resp.StatusCode, + Body: body, + } + + errType := options.ErrorContext + switch resp.StatusCode { + case http.StatusBadRequest: + err = ErrDefault400{respErr} + if error400er, ok := errType.(Err400er); ok { + err = error400er.Error400(respErr) + } + case http.StatusUnauthorized: + if client.ReauthFunc != nil { + err = client.Reauthenticate(prereqtok) + if err != nil { + e := &ErrUnableToReauthenticate{} + e.ErrOriginal = respErr + return nil, e + } + if options.RawBody != nil { + if seeker, ok := options.RawBody.(io.Seeker); ok { + seeker.Seek(0, 0) + } + } + // make a new call to request with a nil reauth func in order to avoid infinite loop + reauthFunc := client.ReauthFunc + client.ReauthFunc = nil + resp, err = client.Request(method, url, options) + client.ReauthFunc = reauthFunc + if err != nil { + switch err.(type) { + case *ErrUnexpectedResponseCode: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err.(*ErrUnexpectedResponseCode) + return nil, e + default: + e := &ErrErrorAfterReauthentication{} + e.ErrOriginal = err + return nil, e + } + } + return resp, nil + } + err = ErrDefault401{respErr} + if error401er, ok := errType.(Err401er); ok { + err = error401er.Error401(respErr) + } + case http.StatusForbidden: + err = ErrDefault403{respErr} + if error403er, ok := errType.(Err403er); ok { + err = error403er.Error403(respErr) + } + case http.StatusNotFound: + err = ErrDefault404{respErr} + if error404er, ok := errType.(Err404er); ok { + err = error404er.Error404(respErr) + } + case http.StatusMethodNotAllowed: + err = ErrDefault405{respErr} + if error405er, ok := errType.(Err405er); ok { + err = error405er.Error405(respErr) + } + case http.StatusRequestTimeout: + err = ErrDefault408{respErr} + if error408er, ok := errType.(Err408er); ok { + err = error408er.Error408(respErr) + } + case 429: + err = ErrDefault429{respErr} + if error429er, ok := errType.(Err429er); ok { + err = error429er.Error429(respErr) + } + case http.StatusInternalServerError: + err = ErrDefault500{respErr} + if error500er, ok := errType.(Err500er); ok { + err = error500er.Error500(respErr) + } + case http.StatusServiceUnavailable: + err = ErrDefault503{respErr} + if error503er, ok := errType.(Err503er); ok { + err = error503er.Error503(respErr) + } + } + + if err == nil { + err = respErr + } + + return resp, err + } + + // Parse the response body as JSON, if requested to do so. + if options.JSONResponse != nil { + defer resp.Body.Close() + if err := json.NewDecoder(resp.Body).Decode(options.JSONResponse); err != nil { + return nil, err + } + } + + return resp, nil +} + +func defaultOkCodes(method string) []int { + switch { + case method == "GET": + return []int{200} + case method == "POST": + return []int{201, 202} + case method == "PUT": + return []int{201, 202} + case method == "PATCH": + return []int{200, 202, 204} + case method == "DELETE": + return []int{202, 204} + } + + return []int{} +} diff --git a/vendor/github.com/gophercloud/gophercloud/results.go b/vendor/github.com/gophercloud/gophercloud/results.go new file mode 100644 index 000000000000..30d3b15599c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/results.go @@ -0,0 +1,446 @@ +package gophercloud + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "time" +) + +/* +Result is an internal type to be used by individual resource packages, but its +methods will be available on a wide variety of user-facing embedding types. + +It acts as a base struct that other Result types, returned from request +functions, can embed for convenience. All Results capture basic information +from the HTTP transaction that was performed, including the response body, +HTTP headers, and any errors that happened. + +Generally, each Result type will have an Extract method that can be used to +further interpret the result's payload in a specific context. Extensions or +providers can then provide additional extraction functions to pull out +provider- or extension-specific information as well. +*/ +type Result struct { + // Body is the payload of the HTTP response from the server. In most cases, + // this will be the deserialized JSON structure. + Body interface{} + + // Header contains the HTTP header structure from the original response. + Header http.Header + + // Err is an error that occurred during the operation. It's deferred until + // extraction to make it easier to chain the Extract call. + Err error +} + +// ExtractInto allows users to provide an object into which `Extract` will extract +// the `Result.Body`. This would be useful for OpenStack providers that have +// different fields in the response object than OpenStack proper. +func (r Result) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + if reader, ok := r.Body.(io.Reader); ok { + if readCloser, ok := reader.(io.Closer); ok { + defer readCloser.Close() + } + return json.NewDecoder(reader).Decode(to) + } + + b, err := json.Marshal(r.Body) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +func (r Result) extractIntoPtr(to interface{}, label string) error { + if label == "" { + return r.ExtractInto(&to) + } + + var m map[string]interface{} + err := r.ExtractInto(&m) + if err != nil { + return err + } + + b, err := json.Marshal(m[label]) + if err != nil { + return err + } + + toValue := reflect.ValueOf(to) + if toValue.Kind() == reflect.Ptr { + toValue = toValue.Elem() + } + + switch toValue.Kind() { + case reflect.Slice: + typeOfV := toValue.Type().Elem() + if typeOfV.Kind() == reflect.Struct { + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + newSlice := reflect.MakeSlice(reflect.SliceOf(typeOfV), 0, 0) + + for _, v := range m[label].([]interface{}) { + // For each iteration of the slice, we create a new struct. + // This is to work around a bug where elements of a slice + // are reused and not overwritten when the same copy of the + // struct is used: + // + // https://github.com/golang/go/issues/21092 + // https://github.com/golang/go/issues/24155 + // https://play.golang.org/p/NHo3ywlPZli + newType := reflect.New(typeOfV).Elem() + + b, err := json.Marshal(v) + if err != nil { + return err + } + + // This is needed for structs with an UnmarshalJSON method. + // Technically this is just unmarshalling the response into + // a struct that is never used, but it's good enough to + // trigger the UnmarshalJSON method. + for i := 0; i < newType.NumField(); i++ { + s := newType.Field(i).Addr().Interface() + + // Unmarshal is used rather than NewDecoder to also work + // around the above-mentioned bug. + err = json.Unmarshal(b, s) + if err != nil { + return err + } + } + + newSlice = reflect.Append(newSlice, newType) + } + + // "to" should now be properly modeled to receive the + // JSON response body and unmarshal into all the correct + // fields of the struct or composed extension struct + // at the end of this method. + toValue.Set(newSlice) + } + } + case reflect.Struct: + typeOfV := toValue.Type() + if typeOfV.NumField() > 0 && typeOfV.Field(0).Anonymous { + for i := 0; i < toValue.NumField(); i++ { + toField := toValue.Field(i) + if toField.Kind() == reflect.Struct { + s := toField.Addr().Interface() + err = json.NewDecoder(bytes.NewReader(b)).Decode(s) + if err != nil { + return err + } + } + } + } + } + + err = json.Unmarshal(b, &to) + return err +} + +// ExtractIntoStructPtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying struct type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoStructPtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Struct: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to struct, got: %v", t) + } +} + +// ExtractIntoSlicePtr will unmarshal the Result (r) into the provided +// interface{} (to). +// +// NOTE: For internal use only +// +// `to` must be a pointer to an underlying slice type +// +// If provided, `label` will be filtered out of the response +// body prior to `r` being unmarshalled into `to`. +func (r Result) ExtractIntoSlicePtr(to interface{}, label string) error { + if r.Err != nil { + return r.Err + } + + t := reflect.TypeOf(to) + if k := t.Kind(); k != reflect.Ptr { + return fmt.Errorf("Expected pointer, got %v", k) + } + switch t.Elem().Kind() { + case reflect.Slice: + return r.extractIntoPtr(to, label) + default: + return fmt.Errorf("Expected pointer to slice, got: %v", t) + } +} + +// PrettyPrintJSON creates a string containing the full response body as +// pretty-printed JSON. It's useful for capturing test fixtures and for +// debugging extraction bugs. If you include its output in an issue related to +// a buggy extraction function, we will all love you forever. +func (r Result) PrettyPrintJSON() string { + pretty, err := json.MarshalIndent(r.Body, "", " ") + if err != nil { + panic(err.Error()) + } + return string(pretty) +} + +// ErrResult is an internal type to be used by individual resource packages, but +// its methods will be available on a wide variety of user-facing embedding +// types. +// +// It represents results that only contain a potential error and +// nothing else. Usually, if the operation executed successfully, the Err field +// will be nil; otherwise it will be stocked with a relevant error. Use the +// ExtractErr method +// to cleanly pull it out. +type ErrResult struct { + Result +} + +// ExtractErr is a function that extracts error information, or nil, from a result. +func (r ErrResult) ExtractErr() error { + return r.Err +} + +/* +HeaderResult is an internal type to be used by individual resource packages, but +its methods will be available on a wide variety of user-facing embedding types. + +It represents a result that only contains an error (possibly nil) and an +http.Header. This is used, for example, by the objectstorage packages in +openstack, because most of the operations don't return response bodies, but do +have relevant information in headers. +*/ +type HeaderResult struct { + Result +} + +// ExtractInto allows users to provide an object into which `Extract` will +// extract the http.Header headers of the result. +func (r HeaderResult) ExtractInto(to interface{}) error { + if r.Err != nil { + return r.Err + } + + tmpHeaderMap := map[string]string{} + for k, v := range r.Header { + if len(v) > 0 { + tmpHeaderMap[k] = v[0] + } + } + + b, err := json.Marshal(tmpHeaderMap) + if err != nil { + return err + } + err = json.Unmarshal(b, to) + + return err +} + +// RFC3339Milli describes a common time format used by some API responses. +const RFC3339Milli = "2006-01-02T15:04:05.999999Z" + +type JSONRFC3339Milli time.Time + +func (jt *JSONRFC3339Milli) UnmarshalJSON(data []byte) error { + b := bytes.NewBuffer(data) + dec := json.NewDecoder(b) + var s string + if err := dec.Decode(&s); err != nil { + return err + } + t, err := time.Parse(RFC3339Milli, s) + if err != nil { + return err + } + *jt = JSONRFC3339Milli(t) + return nil +} + +const RFC3339MilliNoZ = "2006-01-02T15:04:05.999999" + +type JSONRFC3339MilliNoZ time.Time + +func (jt *JSONRFC3339MilliNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339MilliNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339MilliNoZ(t) + return nil +} + +type JSONRFC1123 time.Time + +func (jt *JSONRFC1123) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(time.RFC1123, s) + if err != nil { + return err + } + *jt = JSONRFC1123(t) + return nil +} + +type JSONUnix time.Time + +func (jt *JSONUnix) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + unix, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return err + } + t = time.Unix(unix, 0) + *jt = JSONUnix(t) + return nil +} + +// RFC3339NoZ is the time format used in Heat (Orchestration). +const RFC3339NoZ = "2006-01-02T15:04:05" + +type JSONRFC3339NoZ time.Time + +func (jt *JSONRFC3339NoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339NoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339NoZ(t) + return nil +} + +// RFC3339ZNoT is the time format used in Zun (Containers Service). +const RFC3339ZNoT = "2006-01-02 15:04:05-07:00" + +type JSONRFC3339ZNoT time.Time + +func (jt *JSONRFC3339ZNoT) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoT, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoT(t) + return nil +} + +// RFC3339ZNoTNoZ is another time format used in Zun (Containers Service). +const RFC3339ZNoTNoZ = "2006-01-02 15:04:05" + +type JSONRFC3339ZNoTNoZ time.Time + +func (jt *JSONRFC3339ZNoTNoZ) UnmarshalJSON(data []byte) error { + var s string + if err := json.Unmarshal(data, &s); err != nil { + return err + } + if s == "" { + return nil + } + t, err := time.Parse(RFC3339ZNoTNoZ, s) + if err != nil { + return err + } + *jt = JSONRFC3339ZNoTNoZ(t) + return nil +} + +/* +Link is an internal type to be used in packages of collection resources that are +paginated in a certain way. + +It's a response substructure common to many paginated collection results that is +used to point to related pages. Usually, the one we care about is the one with +Rel field set to "next". +*/ +type Link struct { + Href string `json:"href"` + Rel string `json:"rel"` +} + +/* +ExtractNextURL is an internal function useful for packages of collection +resources that are paginated in a certain way. + +It attempts to extract the "next" URL from slice of Link structs, or +"" if no such URL is present. +*/ +func ExtractNextURL(links []Link) (string, error) { + var url string + + for _, l := range links { + if l.Rel == "next" { + url = l.Href + } + } + + if url == "" { + return "", nil + } + + return url, nil +} diff --git a/vendor/github.com/gophercloud/gophercloud/script/acceptancetest b/vendor/github.com/gophercloud/gophercloud/script/acceptancetest new file mode 100755 index 000000000000..0541c8f4977c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/acceptancetest @@ -0,0 +1,60 @@ +#!/bin/bash +# +set -x + +source `dirname $0`/stackenv + +timeout="60m" +failed= + +# Run the acceptance tests. +# Check the error code after each suite, but do not exit early if a suite failed. + +# Identity v3 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/identity/v3/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Networking v2 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/networking/v2/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Block Storage v2 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/blockstorage/v2/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Block Storage v3 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/blockstorage/v3/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Compute v2 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/compute/v2/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Container v1 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/container/v1/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# Image Service v2 +go test -v -timeout $timeout -tags "fixtures acceptance" ./acceptance/openstack/imageservice/v2/ +if [[ $? != 0 ]]; then + failed=1 +fi + +# If any of the test suites failed, exit 1 +if [[ -n $failed ]]; then + exit 1 +fi + +exit 0 diff --git a/vendor/github.com/gophercloud/gophercloud/script/bootstrap b/vendor/github.com/gophercloud/gophercloud/script/bootstrap new file mode 100755 index 000000000000..78a195dcf79d --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/bootstrap @@ -0,0 +1,25 @@ +#!/bin/bash +# +# This script helps new contributors set up their local workstation for +# gophercloud development and contributions. + +# Create the environment +export GOPATH=$HOME/go/gophercloud +mkdir -p $GOPATH + +# Download gophercloud into that environment +go get github.com/gophercloud/gophercloud +cd $GOPATH/src/github.com/gophercloud/gophercloud +git checkout master + +# Write out the env.sh convenience file. +cd $GOPATH +cat <env.sh +#!/bin/bash +export GOPATH=$(pwd) +export GOPHERCLOUD=$GOPATH/src/github.com/gophercloud/gophercloud +EOF +chmod a+x env.sh + +# Make changes immediately available as a convenience. +. ./env.sh diff --git a/vendor/github.com/gophercloud/gophercloud/script/cibuild b/vendor/github.com/gophercloud/gophercloud/script/cibuild new file mode 100755 index 000000000000..1cb389e7dce0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/cibuild @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Test script to be invoked by Travis. + +exec script/unittest -v diff --git a/vendor/github.com/gophercloud/gophercloud/script/coverage b/vendor/github.com/gophercloud/gophercloud/script/coverage new file mode 100755 index 000000000000..3efa81ba5aea --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/coverage @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +n=1 +for testpkg in $(go list ./testing ./.../testing); do + covpkg="${testpkg/"/testing"/}" + go test -covermode count -coverprofile "testing_"$n.coverprofile -coverpkg $covpkg $testpkg 2>/dev/null + n=$((n+1)) +done +gocovmerge `ls *.coverprofile` > cover.out +rm *.coverprofile diff --git a/vendor/github.com/gophercloud/gophercloud/script/format b/vendor/github.com/gophercloud/gophercloud/script/format new file mode 100755 index 000000000000..05645a825239 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/format @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +goimports="goimports" + +find_files() { + find . -not \( \ + \( \ + -wholename './output' \ + -o -wholename './_output' \ + -o -wholename './_gopath' \ + -o -wholename './release' \ + -o -wholename './target' \ + -o -wholename '*/third_party/*' \ + -o -wholename '*/vendor/*' \ + \) -prune \ + \) -name '*.go' +} + +ignore_files=( + "./openstack/compute/v2/extensions/quotasets/testing/fixtures.go" + "./openstack/networking/v2/extensions/vpnaas/ikepolicies/testing/requests_test.go" +) + +bad_files=$(find_files | xargs ${goimports} -l) + +final_files=() +for bad_file in $bad_files; do + found= + for ignore_file in "${ignore_files[@]}"; do + [[ "${bad_file}" == "${ignore_file}" ]] && { found=1; break; } + done + [[ -n $found ]] || final_files+=("$bad_file") +done + +if [[ "${#final_files[@]}" -gt 0 ]]; then + diff=$(echo "${final_files[@]}" | xargs ${goimports} -d -e 2>&1) + if [[ -n "${diff}" ]]; then + echo "${diff}" + exit 1 + fi +fi diff --git a/vendor/github.com/gophercloud/gophercloud/script/stackenv b/vendor/github.com/gophercloud/gophercloud/script/stackenv new file mode 100644 index 000000000000..6a742a32f127 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/stackenv @@ -0,0 +1,25 @@ +# Prep the testing environment by creating the required testing resources and +# environment variables. This env is for theopenlab CI jobs, you might need +# to modify this according to your setup + +pushd /opt/stack/new/devstack +source openrc admin admin +openstack flavor create m1.acctest --id 99 --ram 512 --disk 5 --vcpu 1 --ephemeral 10 +openstack flavor create m1.resize --id 98 --ram 512 --disk 6 --vcpu 1 --ephemeral 10 +_NETWORK_ID=$(openstack network show private -c id -f value) +_EXTGW_ID=$(openstack network show public -c id -f value) +_IMAGE=$(openstack image list | grep -i cirros | head -n 1) +_IMAGE_ID=$(echo $_IMAGE | awk -F\| '{print $2}' | tr -d ' ') +_IMAGE_NAME=$(echo $_IMAGE | awk -F\| '{print $3}' | tr -d ' ') +echo export OS_IMAGE_NAME="$_IMAGE_NAME" >> openrc +echo export OS_IMAGE_ID="$_IMAGE_ID" >> openrc +echo export OS_NETWORK_ID=$_NETWORK_ID >> openrc +echo export OS_EXTGW_ID=$_EXTGW_ID >> openrc +echo export OS_POOL_NAME="public" >> openrc +echo export OS_FLAVOR_ID=99 >> openrc +echo export OS_FLAVOR_ID_RESIZE=98 >> openrc +echo export OS_SHARE_NETWORK_ID=foobar >> openrc +echo export OS_DOMAIN_ID=default >> openrc +source openrc admin admin +popd + diff --git a/vendor/github.com/gophercloud/gophercloud/script/test b/vendor/github.com/gophercloud/gophercloud/script/test new file mode 100755 index 000000000000..1e03dff8ab34 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/test @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Run all the tests. + +exec go test -tags 'acceptance fixtures' ./... $@ diff --git a/vendor/github.com/gophercloud/gophercloud/script/unittest b/vendor/github.com/gophercloud/gophercloud/script/unittest new file mode 100755 index 000000000000..2c65d06034a4 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/script/unittest @@ -0,0 +1,5 @@ +#!/bin/bash +# +# Run the unit tests. + +exec go test ./testing ./.../testing $@ diff --git a/vendor/github.com/gophercloud/gophercloud/service_client.go b/vendor/github.com/gophercloud/gophercloud/service_client.go new file mode 100644 index 000000000000..2734510e1b45 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/service_client.go @@ -0,0 +1,150 @@ +package gophercloud + +import ( + "io" + "net/http" + "strings" +) + +// ServiceClient stores details required to interact with a specific service API implemented by a provider. +// Generally, you'll acquire these by calling the appropriate `New` method on a ProviderClient. +type ServiceClient struct { + // ProviderClient is a reference to the provider that implements this service. + *ProviderClient + + // Endpoint is the base URL of the service's API, acquired from a service catalog. + // It MUST end with a /. + Endpoint string + + // ResourceBase is the base URL shared by the resources within a service's API. It should include + // the API version and, like Endpoint, MUST end with a / if set. If not set, the Endpoint is used + // as-is, instead. + ResourceBase string + + // This is the service client type (e.g. compute, sharev2). + // NOTE: FOR INTERNAL USE ONLY. DO NOT SET. GOPHERCLOUD WILL SET THIS. + // It is only exported because it gets set in a different package. + Type string + + // The microversion of the service to use. Set this to use a particular microversion. + Microversion string + + // MoreHeaders allows users (or Gophercloud) to set service-wide headers on requests. Put another way, + // values set in this field will be set on all the HTTP requests the service client sends. + MoreHeaders map[string]string +} + +// ResourceBaseURL returns the base URL of any resources used by this service. It MUST end with a /. +func (client *ServiceClient) ResourceBaseURL() string { + if client.ResourceBase != "" { + return client.ResourceBase + } + return client.Endpoint +} + +// ServiceURL constructs a URL for a resource belonging to this provider. +func (client *ServiceClient) ServiceURL(parts ...string) string { + return client.ResourceBaseURL() + strings.Join(parts, "/") +} + +func (client *ServiceClient) initReqOpts(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) { + if v, ok := (JSONBody).(io.Reader); ok { + opts.RawBody = v + } else if JSONBody != nil { + opts.JSONBody = JSONBody + } + + if JSONResponse != nil { + opts.JSONResponse = JSONResponse + } + + if opts.MoreHeaders == nil { + opts.MoreHeaders = make(map[string]string) + } + + if client.Microversion != "" { + client.setMicroversionHeader(opts) + } +} + +// Get calls `Request` with the "GET" HTTP verb. +func (client *ServiceClient) Get(url string, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, JSONResponse, opts) + return client.Request("GET", url, opts) +} + +// Post calls `Request` with the "POST" HTTP verb. +func (client *ServiceClient) Post(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("POST", url, opts) +} + +// Put calls `Request` with the "PUT" HTTP verb. +func (client *ServiceClient) Put(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PUT", url, opts) +} + +// Patch calls `Request` with the "PATCH" HTTP verb. +func (client *ServiceClient) Patch(url string, JSONBody interface{}, JSONResponse interface{}, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, JSONBody, JSONResponse, opts) + return client.Request("PATCH", url, opts) +} + +// Delete calls `Request` with the "DELETE" HTTP verb. +func (client *ServiceClient) Delete(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("DELETE", url, opts) +} + +// Head calls `Request` with the "HEAD" HTTP verb. +func (client *ServiceClient) Head(url string, opts *RequestOpts) (*http.Response, error) { + if opts == nil { + opts = new(RequestOpts) + } + client.initReqOpts(url, nil, nil, opts) + return client.Request("HEAD", url, opts) +} + +func (client *ServiceClient) setMicroversionHeader(opts *RequestOpts) { + switch client.Type { + case "compute": + opts.MoreHeaders["X-OpenStack-Nova-API-Version"] = client.Microversion + case "sharev2": + opts.MoreHeaders["X-OpenStack-Manila-API-Version"] = client.Microversion + case "volume": + opts.MoreHeaders["X-OpenStack-Volume-API-Version"] = client.Microversion + } + + if client.Type != "" { + opts.MoreHeaders["OpenStack-API-Version"] = client.Type + " " + client.Microversion + } +} + +// Request carries out the HTTP operation for the service client +func (client *ServiceClient) Request(method, url string, options *RequestOpts) (*http.Response, error) { + if len(client.MoreHeaders) > 0 { + if options == nil { + options = new(RequestOpts) + } + for k, v := range client.MoreHeaders { + options.MoreHeaders[k] = v + } + } + return client.ProviderClient.Request(method, url, options) +} diff --git a/vendor/github.com/gophercloud/gophercloud/testhelper/client/fake.go b/vendor/github.com/gophercloud/gophercloud/testhelper/client/fake.go new file mode 100644 index 000000000000..3d81cc97b9cc --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testhelper/client/fake.go @@ -0,0 +1,17 @@ +package client + +import ( + "github.com/gophercloud/gophercloud" + "github.com/gophercloud/gophercloud/testhelper" +) + +// Fake token to use. +const TokenID = "cbc36478b0bd8e67e89469c7749d4127" + +// ServiceClient returns a generic service client for use in tests. +func ServiceClient() *gophercloud.ServiceClient { + return &gophercloud.ServiceClient{ + ProviderClient: &gophercloud.ProviderClient{TokenID: TokenID}, + Endpoint: testhelper.Endpoint(), + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/testhelper/convenience.go b/vendor/github.com/gophercloud/gophercloud/testhelper/convenience.go new file mode 100644 index 000000000000..25f6720e82cf --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testhelper/convenience.go @@ -0,0 +1,348 @@ +package testhelper + +import ( + "bytes" + "encoding/json" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +const ( + logBodyFmt = "\033[1;31m%s %s\033[0m" + greenCode = "\033[0m\033[1;32m" + yellowCode = "\033[0m\033[1;33m" + resetCode = "\033[0m\033[1;31m" +) + +func prefix(depth int) string { + _, file, line, _ := runtime.Caller(depth) + return fmt.Sprintf("Failure in %s, line %d:", filepath.Base(file), line) +} + +func green(str interface{}) string { + return fmt.Sprintf("%s%#v%s", greenCode, str, resetCode) +} + +func yellow(str interface{}) string { + return fmt.Sprintf("%s%#v%s", yellowCode, str, resetCode) +} + +func logFatal(t *testing.T, str string) { + t.Fatalf(logBodyFmt, prefix(3), str) +} + +func logError(t *testing.T, str string) { + t.Errorf(logBodyFmt, prefix(3), str) +} + +type diffLogger func([]string, interface{}, interface{}) + +type visit struct { + a1 uintptr + a2 uintptr + typ reflect.Type +} + +// Recursively visits the structures of "expected" and "actual". The diffLogger function will be +// invoked with each different value encountered, including the reference path that was followed +// to get there. +func deepDiffEqual(expected, actual reflect.Value, visited map[visit]bool, path []string, logDifference diffLogger) { + defer func() { + // Fall back to the regular reflect.DeepEquals function. + if r := recover(); r != nil { + var e, a interface{} + if expected.IsValid() { + e = expected.Interface() + } + if actual.IsValid() { + a = actual.Interface() + } + + if !reflect.DeepEqual(e, a) { + logDifference(path, e, a) + } + } + }() + + if !expected.IsValid() && actual.IsValid() { + logDifference(path, nil, actual.Interface()) + return + } + if expected.IsValid() && !actual.IsValid() { + logDifference(path, expected.Interface(), nil) + return + } + if !expected.IsValid() && !actual.IsValid() { + return + } + + hard := func(k reflect.Kind) bool { + switch k { + case reflect.Array, reflect.Map, reflect.Slice, reflect.Struct: + return true + } + return false + } + + if expected.CanAddr() && actual.CanAddr() && hard(expected.Kind()) { + addr1 := expected.UnsafeAddr() + addr2 := actual.UnsafeAddr() + + if addr1 > addr2 { + addr1, addr2 = addr2, addr1 + } + + if addr1 == addr2 { + // References are identical. We can short-circuit + return + } + + typ := expected.Type() + v := visit{addr1, addr2, typ} + if visited[v] { + // Already visited. + return + } + + // Remember this visit for later. + visited[v] = true + } + + switch expected.Kind() { + case reflect.Array: + for i := 0; i < expected.Len(); i++ { + hop := append(path, fmt.Sprintf("[%d]", i)) + deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) + } + return + case reflect.Slice: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { + return + } + for i := 0; i < expected.Len(); i++ { + hop := append(path, fmt.Sprintf("[%d]", i)) + deepDiffEqual(expected.Index(i), actual.Index(i), visited, hop, logDifference) + } + return + case reflect.Interface: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) + return + case reflect.Ptr: + deepDiffEqual(expected.Elem(), actual.Elem(), visited, path, logDifference) + return + case reflect.Struct: + for i, n := 0, expected.NumField(); i < n; i++ { + field := expected.Type().Field(i) + hop := append(path, "."+field.Name) + deepDiffEqual(expected.Field(i), actual.Field(i), visited, hop, logDifference) + } + return + case reflect.Map: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + return + } + if expected.Len() == actual.Len() && expected.Pointer() == actual.Pointer() { + return + } + + var keys []reflect.Value + if expected.Len() >= actual.Len() { + keys = expected.MapKeys() + } else { + keys = actual.MapKeys() + } + + for _, k := range keys { + expectedValue := expected.MapIndex(k) + actualValue := actual.MapIndex(k) + + if !expectedValue.IsValid() { + logDifference(path, nil, actual.Interface()) + return + } + if !actualValue.IsValid() { + logDifference(path, expected.Interface(), nil) + return + } + + hop := append(path, fmt.Sprintf("[%v]", k)) + deepDiffEqual(expectedValue, actualValue, visited, hop, logDifference) + } + return + case reflect.Func: + if expected.IsNil() != actual.IsNil() { + logDifference(path, expected.Interface(), actual.Interface()) + } + return + default: + if expected.Interface() != actual.Interface() { + logDifference(path, expected.Interface(), actual.Interface()) + } + } +} + +func deepDiff(expected, actual interface{}, logDifference diffLogger) { + if expected == nil || actual == nil { + logDifference([]string{}, expected, actual) + return + } + + expectedValue := reflect.ValueOf(expected) + actualValue := reflect.ValueOf(actual) + + if expectedValue.Type() != actualValue.Type() { + logDifference([]string{}, expected, actual) + return + } + deepDiffEqual(expectedValue, actualValue, map[visit]bool{}, []string{}, logDifference) +} + +// AssertEquals compares two arbitrary values and performs a comparison. If the +// comparison fails, a fatal error is raised that will fail the test +func AssertEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + logFatal(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) + } +} + +// CheckEquals is similar to AssertEquals, except with a non-fatal error +func CheckEquals(t *testing.T, expected, actual interface{}) { + if expected != actual { + logError(t, fmt.Sprintf("expected %s but got %s", green(expected), yellow(actual))) + } +} + +// AssertDeepEquals - like Equals - performs a comparison - but on more complex +// structures that requires deeper inspection +func AssertDeepEquals(t *testing.T, expected, actual interface{}) { + pre := prefix(2) + + differed := false + deepDiff(expected, actual, func(path []string, expected, actual interface{}) { + differed = true + t.Errorf("\033[1;31m%sat %s expected %s, but got %s\033[0m", + pre, + strings.Join(path, ""), + green(expected), + yellow(actual)) + }) + if differed { + logFatal(t, "The structures were different.") + } +} + +// CheckDeepEquals is similar to AssertDeepEquals, except with a non-fatal error +func CheckDeepEquals(t *testing.T, expected, actual interface{}) { + pre := prefix(2) + + deepDiff(expected, actual, func(path []string, expected, actual interface{}) { + t.Errorf("\033[1;31m%s at %s expected %s, but got %s\033[0m", + pre, + strings.Join(path, ""), + green(expected), + yellow(actual)) + }) +} + +func isByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) bool { + return bytes.Equal(expectedBytes, actualBytes) +} + +// AssertByteArrayEquals a convenience function for checking whether two byte arrays are equal +func AssertByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) { + if !isByteArrayEquals(t, expectedBytes, actualBytes) { + logFatal(t, "The bytes differed.") + } +} + +// CheckByteArrayEquals a convenience function for silent checking whether two byte arrays are equal +func CheckByteArrayEquals(t *testing.T, expectedBytes []byte, actualBytes []byte) { + if !isByteArrayEquals(t, expectedBytes, actualBytes) { + logError(t, "The bytes differed.") + } +} + +// isJSONEquals is a utility function that implements JSON comparison for AssertJSONEquals and +// CheckJSONEquals. +func isJSONEquals(t *testing.T, expectedJSON string, actual interface{}) bool { + var parsedExpected, parsedActual interface{} + err := json.Unmarshal([]byte(expectedJSON), &parsedExpected) + if err != nil { + t.Errorf("Unable to parse expected value as JSON: %v", err) + return false + } + + jsonActual, err := json.Marshal(actual) + AssertNoErr(t, err) + err = json.Unmarshal(jsonActual, &parsedActual) + AssertNoErr(t, err) + + if !reflect.DeepEqual(parsedExpected, parsedActual) { + prettyExpected, err := json.MarshalIndent(parsedExpected, "", " ") + if err != nil { + t.Logf("Unable to pretty-print expected JSON: %v\n%s", err, expectedJSON) + } else { + // We can't use green() here because %#v prints prettyExpected as a byte array literal, which + // is... unhelpful. Converting it to a string first leaves "\n" uninterpreted for some reason. + t.Logf("Expected JSON:\n%s%s%s", greenCode, prettyExpected, resetCode) + } + + prettyActual, err := json.MarshalIndent(actual, "", " ") + if err != nil { + t.Logf("Unable to pretty-print actual JSON: %v\n%#v", err, actual) + } else { + // We can't use yellow() for the same reason. + t.Logf("Actual JSON:\n%s%s%s", yellowCode, prettyActual, resetCode) + } + + return false + } + return true +} + +// AssertJSONEquals serializes a value as JSON, parses an expected string as JSON, and ensures that +// both are consistent. If they aren't, the expected and actual structures are pretty-printed and +// shown for comparison. +// +// This is useful for comparing structures that are built as nested map[string]interface{} values, +// which are a pain to construct as literals. +func AssertJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { + if !isJSONEquals(t, expectedJSON, actual) { + logFatal(t, "The generated JSON structure differed.") + } +} + +// CheckJSONEquals is similar to AssertJSONEquals, but nonfatal. +func CheckJSONEquals(t *testing.T, expectedJSON string, actual interface{}) { + if !isJSONEquals(t, expectedJSON, actual) { + logError(t, "The generated JSON structure differed.") + } +} + +// AssertNoErr is a convenience function for checking whether an error value is +// an actual error +func AssertNoErr(t *testing.T, e error) { + if e != nil { + logFatal(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) + } +} + +// CheckNoErr is similar to AssertNoErr, except with a non-fatal error +func CheckNoErr(t *testing.T, e error) { + if e != nil { + logError(t, fmt.Sprintf("unexpected error %s", yellow(e.Error()))) + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/testhelper/doc.go b/vendor/github.com/gophercloud/gophercloud/testhelper/doc.go new file mode 100644 index 000000000000..25b4dfebbbe3 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testhelper/doc.go @@ -0,0 +1,4 @@ +/* +Package testhelper container methods that are useful for writing unit tests. +*/ +package testhelper diff --git a/vendor/github.com/gophercloud/gophercloud/testhelper/fixture/helper.go b/vendor/github.com/gophercloud/gophercloud/testhelper/fixture/helper.go new file mode 100644 index 000000000000..fe98c86f99e0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testhelper/fixture/helper.go @@ -0,0 +1,31 @@ +package fixture + +import ( + "fmt" + "net/http" + "testing" + + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func SetupHandler(t *testing.T, url, method, requestBody, responseBody string, status int) { + th.Mux.HandleFunc(url, func(w http.ResponseWriter, r *http.Request) { + th.TestMethod(t, r, method) + th.TestHeader(t, r, "X-Auth-Token", client.TokenID) + + if requestBody != "" { + th.TestJSONRequest(t, r, requestBody) + } + + if responseBody != "" { + w.Header().Add("Content-Type", "application/json") + } + + w.WriteHeader(status) + + if responseBody != "" { + fmt.Fprintf(w, responseBody) + } + }) +} diff --git a/vendor/github.com/gophercloud/gophercloud/testhelper/http_responses.go b/vendor/github.com/gophercloud/gophercloud/testhelper/http_responses.go new file mode 100644 index 000000000000..e1f1f9ac0e89 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testhelper/http_responses.go @@ -0,0 +1,91 @@ +package testhelper + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "reflect" + "testing" +) + +var ( + // Mux is a multiplexer that can be used to register handlers. + Mux *http.ServeMux + + // Server is an in-memory HTTP server for testing. + Server *httptest.Server +) + +// SetupHTTP prepares the Mux and Server. +func SetupHTTP() { + Mux = http.NewServeMux() + Server = httptest.NewServer(Mux) +} + +// TeardownHTTP releases HTTP-related resources. +func TeardownHTTP() { + Server.Close() +} + +// Endpoint returns a fake endpoint that will actually target the Mux. +func Endpoint() string { + return Server.URL + "/" +} + +// TestFormValues ensures that all the URL parameters given to the http.Request are the same as values. +func TestFormValues(t *testing.T, r *http.Request, values map[string]string) { + want := url.Values{} + for k, v := range values { + want.Add(k, v) + } + + r.ParseForm() + if !reflect.DeepEqual(want, r.Form) { + t.Errorf("Request parameters = %v, want %v", r.Form, want) + } +} + +// TestMethod checks that the Request has the expected method (e.g. GET, POST). +func TestMethod(t *testing.T, r *http.Request, expected string) { + if expected != r.Method { + t.Errorf("Request method = %v, expected %v", r.Method, expected) + } +} + +// TestHeader checks that the header on the http.Request matches the expected value. +func TestHeader(t *testing.T, r *http.Request, header string, expected string) { + if actual := r.Header.Get(header); expected != actual { + t.Errorf("Header %s = %s, expected %s", header, actual, expected) + } +} + +// TestBody verifies that the request body matches an expected body. +func TestBody(t *testing.T, r *http.Request, expected string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read body: %v", err) + } + str := string(b) + if expected != str { + t.Errorf("Body = %s, expected %s", str, expected) + } +} + +// TestJSONRequest verifies that the JSON payload of a request matches an expected structure, without asserting things about +// whitespace or ordering. +func TestJSONRequest(t *testing.T, r *http.Request, expected string) { + b, err := ioutil.ReadAll(r.Body) + if err != nil { + t.Errorf("Unable to read request body: %v", err) + } + + var actualJSON interface{} + err = json.Unmarshal(b, &actualJSON) + if err != nil { + t.Errorf("Unable to parse request body as JSON: %v", err) + } + + CheckJSONEquals(t, expected, actualJSON) +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/doc.go b/vendor/github.com/gophercloud/gophercloud/testing/doc.go new file mode 100644 index 000000000000..244a62e32389 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/doc.go @@ -0,0 +1,2 @@ +// gophercloud +package testing diff --git a/vendor/github.com/gophercloud/gophercloud/testing/endpoint_search_test.go b/vendor/github.com/gophercloud/gophercloud/testing/endpoint_search_test.go new file mode 100644 index 000000000000..22476cbb146f --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/endpoint_search_test.go @@ -0,0 +1,20 @@ +package testing + +import ( + "testing" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestApplyDefaultsToEndpointOpts(t *testing.T) { + eo := gophercloud.EndpointOpts{Availability: gophercloud.AvailabilityPublic} + eo.ApplyDefaults("compute") + expected := gophercloud.EndpointOpts{Availability: gophercloud.AvailabilityPublic, Type: "compute"} + th.CheckDeepEquals(t, expected, eo) + + eo = gophercloud.EndpointOpts{Type: "compute"} + eo.ApplyDefaults("object-store") + expected = gophercloud.EndpointOpts{Availability: gophercloud.AvailabilityPublic, Type: "compute"} + th.CheckDeepEquals(t, expected, eo) +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/params_test.go b/vendor/github.com/gophercloud/gophercloud/testing/params_test.go new file mode 100644 index 000000000000..602536649543 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/params_test.go @@ -0,0 +1,276 @@ +package testing + +import ( + "net/url" + "reflect" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestMaybeString(t *testing.T) { + testString := "" + var expected *string + actual := gophercloud.MaybeString(testString) + th.CheckDeepEquals(t, expected, actual) + + testString = "carol" + expected = &testString + actual = gophercloud.MaybeString(testString) + th.CheckDeepEquals(t, expected, actual) +} + +func TestMaybeInt(t *testing.T) { + testInt := 0 + var expected *int + actual := gophercloud.MaybeInt(testInt) + th.CheckDeepEquals(t, expected, actual) + + testInt = 4 + expected = &testInt + actual = gophercloud.MaybeInt(testInt) + th.CheckDeepEquals(t, expected, actual) +} + +func TestBuildQueryString(t *testing.T) { + type testVar string + iFalse := false + opts := struct { + J int `q:"j"` + R string `q:"r" required:"true"` + C bool `q:"c"` + S []string `q:"s"` + TS []testVar `q:"ts"` + TI []int `q:"ti"` + F *bool `q:"f"` + M map[string]string `q:"m"` + }{ + J: 2, + R: "red", + C: true, + S: []string{"one", "two", "three"}, + TS: []testVar{"a", "b"}, + TI: []int{1, 2}, + F: &iFalse, + M: map[string]string{"k1": "success1"}, + } + expected := &url.URL{RawQuery: "c=true&f=false&j=2&m=%7B%27k1%27%3A%27success1%27%7D&r=red&s=one&s=two&s=three&ti=1&ti=2&ts=a&ts=b"} + actual, err := gophercloud.BuildQueryString(&opts) + if err != nil { + t.Errorf("Error building query string: %v", err) + } + th.CheckDeepEquals(t, expected, actual) + + opts = struct { + J int `q:"j"` + R string `q:"r" required:"true"` + C bool `q:"c"` + S []string `q:"s"` + TS []testVar `q:"ts"` + TI []int `q:"ti"` + F *bool `q:"f"` + M map[string]string `q:"m"` + }{ + J: 2, + C: true, + } + _, err = gophercloud.BuildQueryString(&opts) + if err == nil { + t.Errorf("Expected error: 'Required field not set'") + } + th.CheckDeepEquals(t, expected, actual) + + _, err = gophercloud.BuildQueryString(map[string]interface{}{"Number": 4}) + if err == nil { + t.Errorf("Expected error: 'Options type is not a struct'") + } +} + +func TestBuildHeaders(t *testing.T) { + testStruct := struct { + Accept string `h:"Accept"` + Num int `h:"Number" required:"true"` + Style bool `h:"Style"` + }{ + Accept: "application/json", + Num: 4, + Style: true, + } + expected := map[string]string{"Accept": "application/json", "Number": "4", "Style": "true"} + actual, err := gophercloud.BuildHeaders(&testStruct) + th.CheckNoErr(t, err) + th.CheckDeepEquals(t, expected, actual) + + testStruct.Num = 0 + _, err = gophercloud.BuildHeaders(&testStruct) + if err == nil { + t.Errorf("Expected error: 'Required header not set'") + } + + _, err = gophercloud.BuildHeaders(map[string]interface{}{"Number": 4}) + if err == nil { + t.Errorf("Expected error: 'Options type is not a struct'") + } +} + +func TestQueriesAreEscaped(t *testing.T) { + type foo struct { + Name string `q:"something"` + Shape string `q:"else"` + } + + expected := &url.URL{RawQuery: "else=Triangl+e&something=blah%2B%3F%21%21foo"} + + actual, err := gophercloud.BuildQueryString(foo{Name: "blah+?!!foo", Shape: "Triangl e"}) + th.AssertNoErr(t, err) + + th.AssertDeepEquals(t, expected, actual) +} + +func TestBuildRequestBody(t *testing.T) { + type PasswordCredentials struct { + Username string `json:"username" required:"true"` + Password string `json:"password" required:"true"` + } + + type TokenCredentials struct { + ID string `json:"id,omitempty" required:"true"` + } + + type orFields struct { + Filler int `json:"filler,omitempty"` + F1 int `json:"f1,omitempty" or:"F2"` + F2 int `json:"f2,omitempty" or:"F1"` + } + + // AuthOptions wraps a gophercloud AuthOptions in order to adhere to the AuthOptionsBuilder + // interface. + type AuthOptions struct { + PasswordCredentials *PasswordCredentials `json:"passwordCredentials,omitempty" xor:"TokenCredentials"` + + // The TenantID and TenantName fields are optional for the Identity V2 API. + // Some providers allow you to specify a TenantName instead of the TenantId. + // Some require both. Your provider's authentication policies will determine + // how these fields influence authentication. + TenantID string `json:"tenantId,omitempty"` + TenantName string `json:"tenantName,omitempty"` + + // TokenCredentials allows users to authenticate (possibly as another user) with an + // authentication token ID. + TokenCredentials *TokenCredentials `json:"token,omitempty" xor:"PasswordCredentials"` + + OrFields *orFields `json:"or_fields,omitempty"` + } + + var successCases = []struct { + opts AuthOptions + expected map[string]interface{} + }{ + { + AuthOptions{ + PasswordCredentials: &PasswordCredentials{ + Username: "me", + Password: "swordfish", + }, + }, + map[string]interface{}{ + "auth": map[string]interface{}{ + "passwordCredentials": map[string]interface{}{ + "password": "swordfish", + "username": "me", + }, + }, + }, + }, + { + AuthOptions{ + TokenCredentials: &TokenCredentials{ + ID: "1234567", + }, + }, + map[string]interface{}{ + "auth": map[string]interface{}{ + "token": map[string]interface{}{ + "id": "1234567", + }, + }, + }, + }, + } + + for _, successCase := range successCases { + actual, err := gophercloud.BuildRequestBody(successCase.opts, "auth") + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, successCase.expected, actual) + } + + var failCases = []struct { + opts AuthOptions + expected error + }{ + { + AuthOptions{ + TenantID: "987654321", + TenantName: "me", + }, + gophercloud.ErrMissingInput{}, + }, + { + AuthOptions{ + TokenCredentials: &TokenCredentials{ + ID: "1234567", + }, + PasswordCredentials: &PasswordCredentials{ + Username: "me", + Password: "swordfish", + }, + }, + gophercloud.ErrMissingInput{}, + }, + { + AuthOptions{ + PasswordCredentials: &PasswordCredentials{ + Password: "swordfish", + }, + }, + gophercloud.ErrMissingInput{}, + }, + { + AuthOptions{ + PasswordCredentials: &PasswordCredentials{ + Username: "me", + Password: "swordfish", + }, + OrFields: &orFields{ + Filler: 2, + }, + }, + gophercloud.ErrMissingInput{}, + }, + } + + for _, failCase := range failCases { + _, err := gophercloud.BuildRequestBody(failCase.opts, "auth") + th.AssertDeepEquals(t, reflect.TypeOf(failCase.expected), reflect.TypeOf(err)) + } + + createdAt := time.Date(2018, 1, 4, 10, 00, 12, 0, time.UTC) + var complexFields = struct { + Username string `json:"username" required:"true"` + CreatedAt *time.Time `json:"-"` + }{ + Username: "jdoe", + CreatedAt: &createdAt, + } + + expectedComplexFields := map[string]interface{}{ + "username": "jdoe", + } + + actual, err := gophercloud.BuildRequestBody(complexFields, "") + th.AssertNoErr(t, err) + th.AssertDeepEquals(t, expectedComplexFields, actual) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/provider_client_test.go b/vendor/github.com/gophercloud/gophercloud/testing/provider_client_test.go new file mode 100644 index 000000000000..1c6da1b927c7 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/provider_client_test.go @@ -0,0 +1,156 @@ +package testing + +import ( + "fmt" + "io/ioutil" + "net/http" + "reflect" + "sync" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" + "github.com/gophercloud/gophercloud/testhelper/client" +) + +func TestAuthenticatedHeaders(t *testing.T) { + p := &gophercloud.ProviderClient{ + TokenID: "1234", + } + expected := map[string]string{"X-Auth-Token": "1234"} + actual := p.AuthenticatedHeaders() + th.CheckDeepEquals(t, expected, actual) +} + +func TestUserAgent(t *testing.T) { + p := &gophercloud.ProviderClient{} + + p.UserAgent.Prepend("custom-user-agent/2.4.0") + expected := "custom-user-agent/2.4.0 gophercloud/2.0.0" + actual := p.UserAgent.Join() + th.CheckEquals(t, expected, actual) + + p.UserAgent.Prepend("another-custom-user-agent/0.3.0", "a-third-ua/5.9.0") + expected = "another-custom-user-agent/0.3.0 a-third-ua/5.9.0 custom-user-agent/2.4.0 gophercloud/2.0.0" + actual = p.UserAgent.Join() + th.CheckEquals(t, expected, actual) + + p.UserAgent = gophercloud.UserAgent{} + expected = "gophercloud/2.0.0" + actual = p.UserAgent.Join() + th.CheckEquals(t, expected, actual) +} + +func TestConcurrentReauth(t *testing.T) { + var info = struct { + numreauths int + mut *sync.RWMutex + }{ + 0, + new(sync.RWMutex), + } + + numconc := 20 + + prereauthTok := client.TokenID + postreauthTok := "12345678" + + p := new(gophercloud.ProviderClient) + p.UseTokenLock() + p.SetToken(prereauthTok) + p.ReauthFunc = func() error { + time.Sleep(1 * time.Second) + p.AuthenticatedHeaders() + info.mut.Lock() + info.numreauths++ + info.mut.Unlock() + p.TokenID = postreauthTok + return nil + } + + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Auth-Token") != postreauthTok { + w.WriteHeader(http.StatusUnauthorized) + return + } + info.mut.RLock() + hasReauthed := info.numreauths != 0 + info.mut.RUnlock() + + if hasReauthed { + th.CheckEquals(t, p.Token(), postreauthTok) + } + + w.Header().Add("Content-Type", "application/json") + fmt.Fprintf(w, `{}`) + }) + + wg := new(sync.WaitGroup) + reqopts := new(gophercloud.RequestOpts) + reqopts.MoreHeaders = map[string]string{ + "X-Auth-Token": prereauthTok, + } + + for i := 0; i < numconc; i++ { + wg.Add(1) + go func() { + defer wg.Done() + resp, err := p.Request("GET", fmt.Sprintf("%s/route", th.Endpoint()), reqopts) + th.CheckNoErr(t, err) + if resp == nil { + t.Errorf("got a nil response") + return + } + if resp.Body == nil { + t.Errorf("response body was nil") + return + } + defer resp.Body.Close() + actual, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Errorf("error reading response body: %s", err) + return + } + th.CheckByteArrayEquals(t, []byte(`{}`), actual) + }() + } + + wg.Wait() + + th.AssertEquals(t, 1, info.numreauths) +} + +func TestReauthEndLoop(t *testing.T) { + + p := new(gophercloud.ProviderClient) + p.UseTokenLock() + p.SetToken(client.TokenID) + p.ReauthFunc = func() error { + // Reauth func is working and returns no error + return nil + } + + th.SetupHTTP() + defer th.TeardownHTTP() + + th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) { + // route always return 401 + w.WriteHeader(http.StatusUnauthorized) + return + }) + + reqopts := new(gophercloud.RequestOpts) + _, err := p.Request("GET", fmt.Sprintf("%s/route", th.Endpoint()), reqopts) + if err == nil { + t.Errorf("request ends with a nil error") + return + } + + if reflect.TypeOf(err) != reflect.TypeOf(&gophercloud.ErrErrorAfterReauthentication{}) { + t.Errorf("error is not an ErrErrorAfterReauthentication") + } +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/results_test.go b/vendor/github.com/gophercloud/gophercloud/testing/results_test.go new file mode 100644 index 000000000000..ddcb1322a4c1 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/results_test.go @@ -0,0 +1,208 @@ +package testing + +import ( + "encoding/json" + "testing" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" +) + +var singleResponse = ` +{ + "person": { + "name": "Bill", + "email": "bill@example.com", + "location": "Canada" + } +} +` + +var multiResponse = ` +{ + "people": [ + { + "name": "Bill", + "email": "bill@example.com", + "location": "Canada" + }, + { + "name": "Ted", + "email": "ted@example.com", + "location": "Mexico" + } + ] +} +` + +type TestPerson struct { + Name string `json:"-"` + Email string `json:"email"` +} + +func (r *TestPerson) UnmarshalJSON(b []byte) error { + type tmp TestPerson + var s struct { + tmp + Name string `json:"name"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = TestPerson(s.tmp) + r.Name = s.Name + " unmarshalled" + + return nil +} + +type TestPersonExt struct { + Location string `json:"-"` +} + +func (r *TestPersonExt) UnmarshalJSON(b []byte) error { + type tmp TestPersonExt + var s struct { + tmp + Location string `json:"location"` + } + + err := json.Unmarshal(b, &s) + if err != nil { + return err + } + + *r = TestPersonExt(s.tmp) + r.Location = s.Location + " unmarshalled" + + return nil +} + +type TestPersonWithExtensions struct { + TestPerson + TestPersonExt +} + +type TestPersonWithExtensionsNamed struct { + TestPerson TestPerson + TestPersonExt TestPersonExt +} + +// TestUnmarshalAnonymousStruct tests if UnmarshalJSON is called on each +// of the anonymous structs contained in an overarching struct. +func TestUnmarshalAnonymousStructs(t *testing.T) { + var actual TestPersonWithExtensions + + var dejson interface{} + sejson := []byte(singleResponse) + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatal(err) + } + + var singleResult = gophercloud.Result{ + Body: dejson, + } + + err = singleResult.ExtractIntoStructPtr(&actual, "person") + th.AssertNoErr(t, err) + + th.AssertEquals(t, "Bill unmarshalled", actual.Name) + th.AssertEquals(t, "Canada unmarshalled", actual.Location) +} + +// TestUnmarshalSliceofAnonymousStructs tests if UnmarshalJSON is called on each +// of the anonymous structs contained in an overarching struct slice. +func TestUnmarshalSliceOfAnonymousStructs(t *testing.T) { + var actual []TestPersonWithExtensions + + var dejson interface{} + sejson := []byte(multiResponse) + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatal(err) + } + + var multiResult = gophercloud.Result{ + Body: dejson, + } + + err = multiResult.ExtractIntoSlicePtr(&actual, "people") + th.AssertNoErr(t, err) + + th.AssertEquals(t, "Bill unmarshalled", actual[0].Name) + th.AssertEquals(t, "Canada unmarshalled", actual[0].Location) + th.AssertEquals(t, "Ted unmarshalled", actual[1].Name) + th.AssertEquals(t, "Mexico unmarshalled", actual[1].Location) +} + +// TestUnmarshalSliceOfStruct tests if extracting results from a "normal" +// struct still works correctly. +func TestUnmarshalSliceofStruct(t *testing.T) { + var actual []TestPerson + + var dejson interface{} + sejson := []byte(multiResponse) + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatal(err) + } + + var multiResult = gophercloud.Result{ + Body: dejson, + } + + err = multiResult.ExtractIntoSlicePtr(&actual, "people") + th.AssertNoErr(t, err) + + th.AssertEquals(t, "Bill unmarshalled", actual[0].Name) + th.AssertEquals(t, "Ted unmarshalled", actual[1].Name) +} + +// TestUnmarshalNamedStruct tests if the result is empty. +func TestUnmarshalNamedStructs(t *testing.T) { + var actual TestPersonWithExtensionsNamed + + var dejson interface{} + sejson := []byte(singleResponse) + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatal(err) + } + + var singleResult = gophercloud.Result{ + Body: dejson, + } + + err = singleResult.ExtractIntoStructPtr(&actual, "person") + th.AssertNoErr(t, err) + + th.AssertEquals(t, "", actual.TestPerson.Name) + th.AssertEquals(t, "", actual.TestPersonExt.Location) +} + +// TestUnmarshalSliceofNamedStructs tests if the result is empty. +func TestUnmarshalSliceOfNamedStructs(t *testing.T) { + var actual []TestPersonWithExtensionsNamed + + var dejson interface{} + sejson := []byte(multiResponse) + err := json.Unmarshal(sejson, &dejson) + if err != nil { + t.Fatal(err) + } + + var multiResult = gophercloud.Result{ + Body: dejson, + } + + err = multiResult.ExtractIntoSlicePtr(&actual, "people") + th.AssertNoErr(t, err) + + th.AssertEquals(t, "", actual[0].TestPerson.Name) + th.AssertEquals(t, "", actual[0].TestPersonExt.Location) + th.AssertEquals(t, "", actual[1].TestPerson.Name) + th.AssertEquals(t, "", actual[1].TestPersonExt.Location) +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/service_client_test.go b/vendor/github.com/gophercloud/gophercloud/testing/service_client_test.go new file mode 100644 index 000000000000..034fdc1d936c --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/service_client_test.go @@ -0,0 +1,34 @@ +package testing + +import ( + "fmt" + "net/http" + "testing" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestServiceURL(t *testing.T) { + c := &gophercloud.ServiceClient{Endpoint: "http://123.45.67.8/"} + expected := "http://123.45.67.8/more/parts/here" + actual := c.ServiceURL("more", "parts", "here") + th.CheckEquals(t, expected, actual) +} + +func TestMoreHeaders(t *testing.T) { + th.SetupHTTP() + defer th.TeardownHTTP() + th.Mux.HandleFunc("/route", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + + c := new(gophercloud.ServiceClient) + c.MoreHeaders = map[string]string{ + "custom": "header", + } + c.ProviderClient = new(gophercloud.ProviderClient) + resp, err := c.Get(fmt.Sprintf("%s/route", th.Endpoint()), nil, nil) + th.AssertNoErr(t, err) + th.AssertEquals(t, resp.Request.Header.Get("custom"), "header") +} diff --git a/vendor/github.com/gophercloud/gophercloud/testing/util_test.go b/vendor/github.com/gophercloud/gophercloud/testing/util_test.go new file mode 100644 index 000000000000..ae3e448fa9c0 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/testing/util_test.go @@ -0,0 +1,122 @@ +package testing + +import ( + "errors" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/gophercloud/gophercloud" + th "github.com/gophercloud/gophercloud/testhelper" +) + +func TestWaitFor(t *testing.T) { + err := gophercloud.WaitFor(2, func() (bool, error) { + return true, nil + }) + th.CheckNoErr(t, err) +} + +func TestWaitForTimeout(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + err := gophercloud.WaitFor(1, func() (bool, error) { + return false, nil + }) + th.AssertEquals(t, "A timeout occurred", err.Error()) +} + +func TestWaitForError(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + err := gophercloud.WaitFor(2, func() (bool, error) { + return false, errors.New("Error has occurred") + }) + th.AssertEquals(t, "Error has occurred", err.Error()) +} + +func TestWaitForPredicateExceed(t *testing.T) { + if testing.Short() { + t.Skip("skipping test in short mode.") + } + + err := gophercloud.WaitFor(1, func() (bool, error) { + time.Sleep(4 * time.Second) + return false, errors.New("Just wasting time") + }) + th.AssertEquals(t, "A timeout occurred", err.Error()) +} + +func TestNormalizeURL(t *testing.T) { + urls := []string{ + "NoSlashAtEnd", + "SlashAtEnd/", + } + expected := []string{ + "NoSlashAtEnd/", + "SlashAtEnd/", + } + for i := 0; i < len(expected); i++ { + th.CheckEquals(t, expected[i], gophercloud.NormalizeURL(urls[i])) + } + +} + +func TestNormalizePathURL(t *testing.T) { + baseDir, _ := os.Getwd() + + rawPath := "template.yaml" + basePath, _ := filepath.Abs(".") + result, _ := gophercloud.NormalizePathURL(basePath, rawPath) + expected := strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "template.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "http://www.google.com" + basePath, _ = filepath.Abs(".") + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath, _ = filepath.Abs(".") + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = "http://www.google.com" + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml/" + basePath = "http://www.google.com/" + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = "http://www.google.com/even/more" + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = "http://www.google.com/even/more/very/nested/file.yaml" + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml" + basePath = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more"}, "/") + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more/very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + + rawPath = "very/nested/file.yaml/" + basePath = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more"}, "/") + result, _ = gophercloud.NormalizePathURL(basePath, rawPath) + expected = strings.Join([]string{"file:/", filepath.ToSlash(baseDir), "only/file/even/more/very/nested/file.yaml"}, "/") + th.CheckEquals(t, expected, result) + +} diff --git a/vendor/github.com/gophercloud/gophercloud/util.go b/vendor/github.com/gophercloud/gophercloud/util.go new file mode 100644 index 000000000000..68f9a5d3eca5 --- /dev/null +++ b/vendor/github.com/gophercloud/gophercloud/util.go @@ -0,0 +1,102 @@ +package gophercloud + +import ( + "fmt" + "net/url" + "path/filepath" + "strings" + "time" +) + +// WaitFor polls a predicate function, once per second, up to a timeout limit. +// This is useful to wait for a resource to transition to a certain state. +// To handle situations when the predicate might hang indefinitely, the +// predicate will be prematurely cancelled after the timeout. +// Resource packages will wrap this in a more convenient function that's +// specific to a certain resource, but it can also be useful on its own. +func WaitFor(timeout int, predicate func() (bool, error)) error { + type WaitForResult struct { + Success bool + Error error + } + + start := time.Now().Unix() + + for { + // If a timeout is set, and that's been exceeded, shut it down. + if timeout >= 0 && time.Now().Unix()-start >= int64(timeout) { + return fmt.Errorf("A timeout occurred") + } + + time.Sleep(1 * time.Second) + + var result WaitForResult + ch := make(chan bool, 1) + go func() { + defer close(ch) + satisfied, err := predicate() + result.Success = satisfied + result.Error = err + }() + + select { + case <-ch: + if result.Error != nil { + return result.Error + } + if result.Success { + return nil + } + // If the predicate has not finished by the timeout, cancel it. + case <-time.After(time.Duration(timeout) * time.Second): + return fmt.Errorf("A timeout occurred") + } + } +} + +// NormalizeURL is an internal function to be used by provider clients. +// +// It ensures that each endpoint URL has a closing `/`, as expected by +// ServiceClient's methods. +func NormalizeURL(url string) string { + if !strings.HasSuffix(url, "/") { + return url + "/" + } + return url +} + +// NormalizePathURL is used to convert rawPath to a fqdn, using basePath as +// a reference in the filesystem, if necessary. basePath is assumed to contain +// either '.' when first used, or the file:// type fqdn of the parent resource. +// e.g. myFavScript.yaml => file://opt/lib/myFavScript.yaml +func NormalizePathURL(basePath, rawPath string) (string, error) { + u, err := url.Parse(rawPath) + if err != nil { + return "", err + } + // if a scheme is defined, it must be a fqdn already + if u.Scheme != "" { + return u.String(), nil + } + // if basePath is a url, then child resources are assumed to be relative to it + bu, err := url.Parse(basePath) + if err != nil { + return "", err + } + var basePathSys, absPathSys string + if bu.Scheme != "" { + basePathSys = filepath.FromSlash(bu.Path) + absPathSys = filepath.Join(basePathSys, rawPath) + bu.Path = filepath.ToSlash(absPathSys) + return bu.String(), nil + } + + absPathSys = filepath.Join(basePath, rawPath) + u.Path = filepath.ToSlash(absPathSys) + if err != nil { + return "", err + } + u.Scheme = "file" + return u.String(), nil + +} diff --git a/vendor/github.com/gregjones/httpcache/LICENSE.txt b/vendor/github.com/gregjones/httpcache/LICENSE.txt new file mode 100644 index 000000000000..81316beb0cbd --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/LICENSE.txt @@ -0,0 +1,7 @@ +Copyright © 2012 Greg Jones (greg.jones@gmail.com) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gregjones/httpcache/README.md b/vendor/github.com/gregjones/httpcache/README.md new file mode 100644 index 000000000000..09c9e7c173a0 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/README.md @@ -0,0 +1,25 @@ +httpcache +========= + +[![Build Status](https://travis-ci.org/gregjones/httpcache.svg?branch=master)](https://travis-ci.org/gregjones/httpcache) [![GoDoc](https://godoc.org/github.com/gregjones/httpcache?status.svg)](https://godoc.org/github.com/gregjones/httpcache) + +Package httpcache provides a http.RoundTripper implementation that works as a mostly [RFC 7234](https://tools.ietf.org/html/rfc7234) compliant cache for http responses. + +It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client and not for a shared proxy). + +Cache Backends +-------------- + +- The built-in 'memory' cache stores responses in an in-memory map. +- [`github.com/gregjones/httpcache/diskcache`](https://github.com/gregjones/httpcache/tree/master/diskcache) provides a filesystem-backed cache using the [diskv](https://github.com/peterbourgon/diskv) library. +- [`github.com/gregjones/httpcache/memcache`](https://github.com/gregjones/httpcache/tree/master/memcache) provides memcache implementations, for both App Engine and 'normal' memcache servers. +- [`sourcegraph.com/sourcegraph/s3cache`](https://sourcegraph.com/github.com/sourcegraph/s3cache) uses Amazon S3 for storage. +- [`github.com/gregjones/httpcache/leveldbcache`](https://github.com/gregjones/httpcache/tree/master/leveldbcache) provides a filesystem-backed cache using [leveldb](https://github.com/syndtr/goleveldb/leveldb). +- [`github.com/die-net/lrucache`](https://github.com/die-net/lrucache) provides an in-memory cache that will evict least-recently used entries. +- [`github.com/die-net/lrucache/twotier`](https://github.com/die-net/lrucache/tree/master/twotier) allows caches to be combined, for example to use lrucache above with a persistent disk-cache. +- [`github.com/birkelund/boltdbcache`](https://github.com/birkelund/boltdbcache) provides a BoltDB implementation (based on the [bbolt](https://github.com/coreos/bbolt) fork). + +License +------- + +- [MIT License](LICENSE.txt) diff --git a/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go new file mode 100644 index 000000000000..42e3129d8237 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/diskcache/diskcache.go @@ -0,0 +1,61 @@ +// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package +// to supplement an in-memory map with persistent storage +// +package diskcache + +import ( + "bytes" + "crypto/md5" + "encoding/hex" + "github.com/peterbourgon/diskv" + "io" +) + +// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage +type Cache struct { + d *diskv.Diskv +} + +// Get returns the response corresponding to key if present +func (c *Cache) Get(key string) (resp []byte, ok bool) { + key = keyToFilename(key) + resp, err := c.d.Read(key) + if err != nil { + return []byte{}, false + } + return resp, true +} + +// Set saves a response to the cache as key +func (c *Cache) Set(key string, resp []byte) { + key = keyToFilename(key) + c.d.WriteStream(key, bytes.NewReader(resp), true) +} + +// Delete removes the response with key from the cache +func (c *Cache) Delete(key string) { + key = keyToFilename(key) + c.d.Erase(key) +} + +func keyToFilename(key string) string { + h := md5.New() + io.WriteString(h, key) + return hex.EncodeToString(h.Sum(nil)) +} + +// New returns a new Cache that will store files in basePath +func New(basePath string) *Cache { + return &Cache{ + d: diskv.New(diskv.Options{ + BasePath: basePath, + CacheSizeMax: 100 * 1024 * 1024, // 100MB + }), + } +} + +// NewWithDiskv returns a new Cache using the provided Diskv as underlying +// storage. +func NewWithDiskv(d *diskv.Diskv) *Cache { + return &Cache{d} +} diff --git a/vendor/github.com/gregjones/httpcache/httpcache.go b/vendor/github.com/gregjones/httpcache/httpcache.go new file mode 100644 index 000000000000..f6a2ec4a53e8 --- /dev/null +++ b/vendor/github.com/gregjones/httpcache/httpcache.go @@ -0,0 +1,551 @@ +// Package httpcache provides a http.RoundTripper implementation that works as a +// mostly RFC-compliant cache for http responses. +// +// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client +// and not for a shared proxy). +// +package httpcache + +import ( + "bufio" + "bytes" + "errors" + "io" + "io/ioutil" + "net/http" + "net/http/httputil" + "strings" + "sync" + "time" +) + +const ( + stale = iota + fresh + transparent + // XFromCache is the header added to responses that are returned from the cache + XFromCache = "X-From-Cache" +) + +// A Cache interface is used by the Transport to store and retrieve responses. +type Cache interface { + // Get returns the []byte representation of a cached response and a bool + // set to true if the value isn't empty + Get(key string) (responseBytes []byte, ok bool) + // Set stores the []byte representation of a response against a key + Set(key string, responseBytes []byte) + // Delete removes the value associated with the key + Delete(key string) +} + +// cacheKey returns the cache key for req. +func cacheKey(req *http.Request) string { + if req.Method == http.MethodGet { + return req.URL.String() + } else { + return req.Method + " " + req.URL.String() + } +} + +// CachedResponse returns the cached http.Response for req if present, and nil +// otherwise. +func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) { + cachedVal, ok := c.Get(cacheKey(req)) + if !ok { + return + } + + b := bytes.NewBuffer(cachedVal) + return http.ReadResponse(bufio.NewReader(b), req) +} + +// MemoryCache is an implemtation of Cache that stores responses in an in-memory map. +type MemoryCache struct { + mu sync.RWMutex + items map[string][]byte +} + +// Get returns the []byte representation of the response and true if present, false if not +func (c *MemoryCache) Get(key string) (resp []byte, ok bool) { + c.mu.RLock() + resp, ok = c.items[key] + c.mu.RUnlock() + return resp, ok +} + +// Set saves response resp to the cache with key +func (c *MemoryCache) Set(key string, resp []byte) { + c.mu.Lock() + c.items[key] = resp + c.mu.Unlock() +} + +// Delete removes key from the cache +func (c *MemoryCache) Delete(key string) { + c.mu.Lock() + delete(c.items, key) + c.mu.Unlock() +} + +// NewMemoryCache returns a new Cache that will store items in an in-memory map +func NewMemoryCache() *MemoryCache { + c := &MemoryCache{items: map[string][]byte{}} + return c +} + +// Transport is an implementation of http.RoundTripper that will return values from a cache +// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since) +// to repeated requests allowing servers to return 304 / Not Modified +type Transport struct { + // The RoundTripper interface actually used to make requests + // If nil, http.DefaultTransport is used + Transport http.RoundTripper + Cache Cache + // If true, responses returned from the cache will be given an extra header, X-From-Cache + MarkCachedResponses bool +} + +// NewTransport returns a new Transport with the +// provided Cache implementation and MarkCachedResponses set to true +func NewTransport(c Cache) *Transport { + return &Transport{Cache: c, MarkCachedResponses: true} +} + +// Client returns an *http.Client that caches responses. +func (t *Transport) Client() *http.Client { + return &http.Client{Transport: t} +} + +// varyMatches will return false unless all of the cached values for the headers listed in Vary +// match the new request +func varyMatches(cachedResp *http.Response, req *http.Request) bool { + for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") { + header = http.CanonicalHeaderKey(header) + if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) { + return false + } + } + return true +} + +// RoundTrip takes a Request and returns a Response +// +// If there is a fresh Response already in cache, then it will be returned without connecting to +// the server. +// +// If there is a stale Response, then any validators it contains will be set on the new request +// to give the server a chance to respond with NotModified. If this happens, then the cached Response +// will be returned. +func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) { + cacheKey := cacheKey(req) + cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == "" + var cachedResp *http.Response + if cacheable { + cachedResp, err = CachedResponse(t.Cache, req) + } else { + // Need to invalidate an existing value + t.Cache.Delete(cacheKey) + } + + transport := t.Transport + if transport == nil { + transport = http.DefaultTransport + } + + if cacheable && cachedResp != nil && err == nil { + if t.MarkCachedResponses { + cachedResp.Header.Set(XFromCache, "1") + } + + if varyMatches(cachedResp, req) { + // Can only use cached value if the new request doesn't Vary significantly + freshness := getFreshness(cachedResp.Header, req.Header) + if freshness == fresh { + return cachedResp, nil + } + + if freshness == stale { + var req2 *http.Request + // Add validators if caller hasn't already done so + etag := cachedResp.Header.Get("etag") + if etag != "" && req.Header.Get("etag") == "" { + req2 = cloneRequest(req) + req2.Header.Set("if-none-match", etag) + } + lastModified := cachedResp.Header.Get("last-modified") + if lastModified != "" && req.Header.Get("last-modified") == "" { + if req2 == nil { + req2 = cloneRequest(req) + } + req2.Header.Set("if-modified-since", lastModified) + } + if req2 != nil { + req = req2 + } + } + } + + resp, err = transport.RoundTrip(req) + if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified { + // Replace the 304 response with the one from cache, but update with some new headers + endToEndHeaders := getEndToEndHeaders(resp.Header) + for _, header := range endToEndHeaders { + cachedResp.Header[header] = resp.Header[header] + } + resp = cachedResp + } else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) && + req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) { + // In case of transport failure and stale-if-error activated, returns cached content + // when available + return cachedResp, nil + } else { + if err != nil || resp.StatusCode != http.StatusOK { + t.Cache.Delete(cacheKey) + } + if err != nil { + return nil, err + } + } + } else { + reqCacheControl := parseCacheControl(req.Header) + if _, ok := reqCacheControl["only-if-cached"]; ok { + resp = newGatewayTimeoutResponse(req) + } else { + resp, err = transport.RoundTrip(req) + if err != nil { + return nil, err + } + } + } + + if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) { + for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") { + varyKey = http.CanonicalHeaderKey(varyKey) + fakeHeader := "X-Varied-" + varyKey + reqValue := req.Header.Get(varyKey) + if reqValue != "" { + resp.Header.Set(fakeHeader, reqValue) + } + } + switch req.Method { + case "GET": + // Delay caching until EOF is reached. + resp.Body = &cachingReadCloser{ + R: resp.Body, + OnEOF: func(r io.Reader) { + resp := *resp + resp.Body = ioutil.NopCloser(r) + respBytes, err := httputil.DumpResponse(&resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + }, + } + default: + respBytes, err := httputil.DumpResponse(resp, true) + if err == nil { + t.Cache.Set(cacheKey, respBytes) + } + } + } else { + t.Cache.Delete(cacheKey) + } + return resp, nil +} + +// ErrNoDateHeader indicates that the HTTP headers contained no Date header. +var ErrNoDateHeader = errors.New("no Date header") + +// Date parses and returns the value of the Date header. +func Date(respHeaders http.Header) (date time.Time, err error) { + dateHeader := respHeaders.Get("date") + if dateHeader == "" { + err = ErrNoDateHeader + return + } + + return time.Parse(time.RFC1123, dateHeader) +} + +type realClock struct{} + +func (c *realClock) since(d time.Time) time.Duration { + return time.Since(d) +} + +type timer interface { + since(d time.Time) time.Duration +} + +var clock timer = &realClock{} + +// getFreshness will return one of fresh/stale/transparent based on the cache-control +// values of the request and the response +// +// fresh indicates the response can be returned +// stale indicates that the response needs validating before it is returned +// transparent indicates the response should not be used to fulfil the request +// +// Because this is only a private cache, 'public' and 'private' in cache-control aren't +// signficant. Similarly, smax-age isn't used. +func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + if _, ok := reqCacheControl["no-cache"]; ok { + return transparent + } + if _, ok := respCacheControl["no-cache"]; ok { + return stale + } + if _, ok := reqCacheControl["only-if-cached"]; ok { + return fresh + } + + date, err := Date(respHeaders) + if err != nil { + return stale + } + currentAge := clock.since(date) + + var lifetime time.Duration + var zeroDuration time.Duration + + // If a response includes both an Expires header and a max-age directive, + // the max-age directive overrides the Expires header, even if the Expires header is more restrictive. + if maxAge, ok := respCacheControl["max-age"]; ok { + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } else { + expiresHeader := respHeaders.Get("Expires") + if expiresHeader != "" { + expires, err := time.Parse(time.RFC1123, expiresHeader) + if err != nil { + lifetime = zeroDuration + } else { + lifetime = expires.Sub(date) + } + } + } + + if maxAge, ok := reqCacheControl["max-age"]; ok { + // the client is willing to accept a response whose age is no greater than the specified time in seconds + lifetime, err = time.ParseDuration(maxAge + "s") + if err != nil { + lifetime = zeroDuration + } + } + if minfresh, ok := reqCacheControl["min-fresh"]; ok { + // the client wants a response that will still be fresh for at least the specified number of seconds. + minfreshDuration, err := time.ParseDuration(minfresh + "s") + if err == nil { + currentAge = time.Duration(currentAge + minfreshDuration) + } + } + + if maxstale, ok := reqCacheControl["max-stale"]; ok { + // Indicates that the client is willing to accept a response that has exceeded its expiration time. + // If max-stale is assigned a value, then the client is willing to accept a response that has exceeded + // its expiration time by no more than the specified number of seconds. + // If no value is assigned to max-stale, then the client is willing to accept a stale response of any age. + // + // Responses served only because of a max-stale value are supposed to have a Warning header added to them, + // but that seems like a hassle, and is it actually useful? If so, then there needs to be a different + // return-value available here. + if maxstale == "" { + return fresh + } + maxstaleDuration, err := time.ParseDuration(maxstale + "s") + if err == nil { + currentAge = time.Duration(currentAge - maxstaleDuration) + } + } + + if lifetime > currentAge { + return fresh + } + + return stale +} + +// Returns true if either the request or the response includes the stale-if-error +// cache control extension: https://tools.ietf.org/html/rfc5861 +func canStaleOnError(respHeaders, reqHeaders http.Header) bool { + respCacheControl := parseCacheControl(respHeaders) + reqCacheControl := parseCacheControl(reqHeaders) + + var err error + lifetime := time.Duration(-1) + + if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok { + if staleMaxAge != "" { + lifetime, err = time.ParseDuration(staleMaxAge + "s") + if err != nil { + return false + } + } else { + return true + } + } + + if lifetime >= 0 { + date, err := Date(respHeaders) + if err != nil { + return false + } + currentAge := clock.since(date) + if lifetime > currentAge { + return true + } + } + + return false +} + +func getEndToEndHeaders(respHeaders http.Header) []string { + // These headers are always hop-by-hop + hopByHopHeaders := map[string]struct{}{ + "Connection": struct{}{}, + "Keep-Alive": struct{}{}, + "Proxy-Authenticate": struct{}{}, + "Proxy-Authorization": struct{}{}, + "Te": struct{}{}, + "Trailers": struct{}{}, + "Transfer-Encoding": struct{}{}, + "Upgrade": struct{}{}, + } + + for _, extra := range strings.Split(respHeaders.Get("connection"), ",") { + // any header listed in connection, if present, is also considered hop-by-hop + if strings.Trim(extra, " ") != "" { + hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{} + } + } + endToEndHeaders := []string{} + for respHeader, _ := range respHeaders { + if _, ok := hopByHopHeaders[respHeader]; !ok { + endToEndHeaders = append(endToEndHeaders, respHeader) + } + } + return endToEndHeaders +} + +func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) { + if _, ok := respCacheControl["no-store"]; ok { + return false + } + if _, ok := reqCacheControl["no-store"]; ok { + return false + } + return true +} + +func newGatewayTimeoutResponse(req *http.Request) *http.Response { + var braw bytes.Buffer + braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n") + resp, err := http.ReadResponse(bufio.NewReader(&braw), req) + if err != nil { + panic(err) + } + return resp +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +// (This function copyright goauth2 authors: https://code.google.com/p/goauth2) +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header) + for k, s := range r.Header { + r2.Header[k] = s + } + return r2 +} + +type cacheControl map[string]string + +func parseCacheControl(headers http.Header) cacheControl { + cc := cacheControl{} + ccHeader := headers.Get("Cache-Control") + for _, part := range strings.Split(ccHeader, ",") { + part = strings.Trim(part, " ") + if part == "" { + continue + } + if strings.ContainsRune(part, '=') { + keyval := strings.Split(part, "=") + cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",") + } else { + cc[part] = "" + } + } + return cc +} + +// headerAllCommaSepValues returns all comma-separated values (each +// with whitespace trimmed) for header name in headers. According to +// Section 4.2 of the HTTP/1.1 spec +// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2), +// values from multiple occurrences of a header should be concatenated, if +// the header's value is a comma-separated list. +func headerAllCommaSepValues(headers http.Header, name string) []string { + var vals []string + for _, val := range headers[http.CanonicalHeaderKey(name)] { + fields := strings.Split(val, ",") + for i, f := range fields { + fields[i] = strings.TrimSpace(f) + } + vals = append(vals, fields...) + } + return vals +} + +// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF +// handler with a full copy of the content read from R when EOF is +// reached. +type cachingReadCloser struct { + // Underlying ReadCloser. + R io.ReadCloser + // OnEOF is called with a copy of the content of R when EOF is reached. + OnEOF func(io.Reader) + + buf bytes.Buffer // buf stores a copy of the content of R. +} + +// Read reads the next len(p) bytes from R or until R is drained. The +// return value n is the number of bytes read. If R has no data to +// return, err is io.EOF and OnEOF is called with a full copy of what +// has been read so far. +func (r *cachingReadCloser) Read(p []byte) (n int, err error) { + n, err = r.R.Read(p) + r.buf.Write(p[:n]) + if err == io.EOF { + r.OnEOF(bytes.NewReader(r.buf.Bytes())) + } + return n, err +} + +func (r *cachingReadCloser) Close() error { + return r.R.Close() +} + +// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation +func NewMemoryCacheTransport() *Transport { + c := NewMemoryCache() + t := NewTransport(c) + return t +} diff --git a/vendor/github.com/hashicorp/go-discover/README.md b/vendor/github.com/hashicorp/go-discover/README.md index 7e61ba977bb7..ae87400fe9ef 100644 --- a/vendor/github.com/hashicorp/go-discover/README.md +++ b/vendor/github.com/hashicorp/go-discover/README.md @@ -33,9 +33,15 @@ function. * vSphere [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/vsphere/vsphere_discover.go#L148-L155) * Packet [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/packet/packet_discover.go#L25-L35) +The following providers are implemented in the go-discover/provider subdirectory +but aren't automatically registered. If you want to support these providers, +register them manually: + + * Kubernetes [Config options](https://github.com/hashicorp/go-discover/blob/master/provider/k8s/k8s_discover.go#L32-L51) + HashiCorp maintains acceptance tests that regularly allocate and run tests with real resources to verify the behavior of several of these providers. Those -currently are: Amazon AWS, Microsoft Azure, Google Cloud, DigitalOcean, Triton, Scaleway and AliBaba Cloud. +currently are: Amazon AWS, Microsoft Azure, Google Cloud, DigitalOcean, Triton, Scaleway, AliBaba Cloud, vSphere, and Packet.net. ### Config Example @@ -70,8 +76,11 @@ provider=triton account=testaccount url=https://us-sw-1.api.joyentcloud.com key_ # vSphere provider=vsphere category_name=consul-role tag_name=consul-server host=... user=... password=... insecure_ssl=[true|false] -# Packet +# Packet provider=packet auth_token=token project=uuid url=... address_type=... + +# Kubernetes +provider=k8s label_selector="app = consul-server" ``` ## Command Line Tool Usage @@ -118,6 +127,25 @@ cfg := "provider=aws region=eu-west-1 ..." addrs, err := d.Addrs(cfg, l) ``` +You can also add support for providers that aren't registered by default: + +```go +// Imports at top of file +import "github.com/hashicorp/go-discover/provider/k8s" + +// support discovery for all supported providers +d := discover.Discover{} + +// support discovery for AWS and GCE only +d := discover.Discover{ + Providers : map[string]discover.Provider{ + "k8s": &k8s.Provider{}, + } +} + +// ... +``` + For complete API documentation, see [GoDoc](https://godoc.org/github.com/hashicorp/go-discover). The configuration for the supported providers is documented in the diff --git a/vendor/github.com/hashicorp/go-discover/go.mod b/vendor/github.com/hashicorp/go-discover/go.mod new file mode 100644 index 000000000000..3cb6abe26cc7 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/go.mod @@ -0,0 +1,72 @@ +module github.com/hashicorp/go-discover + +require ( + cloud.google.com/go v0.26.0 // indirect + github.com/Azure/azure-sdk-for-go v16.0.0+incompatible + github.com/Azure/go-autorest v10.7.0+incompatible + github.com/Sirupsen/logrus v1.0.6 // indirect + github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af // indirect + github.com/aws/aws-sdk-go v1.15.24 + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/digitalocean/godo v1.1.1 + github.com/fsnotify/fsnotify v1.4.7 // indirect + github.com/ghodss/yaml v1.0.0 // indirect + github.com/gogo/protobuf v1.1.1 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/protobuf v1.2.0 // indirect + github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c // indirect + github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 // indirect + github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf // indirect + github.com/googleapis/gnostic v0.2.0 // indirect + github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca + github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c // indirect + github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect + github.com/hashicorp/go-multierror v1.0.0 + github.com/hpcloud/tail v1.0.0 // indirect + github.com/imdario/mergo v0.3.6 // indirect + github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da // indirect + github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 + github.com/json-iterator/go v1.1.5 // indirect + github.com/jtolds/gls v4.2.1+incompatible // indirect + github.com/mitchellh/go-homedir v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 + github.com/onsi/ginkgo v1.6.0 // indirect + github.com/onsi/gomega v1.4.1 // indirect + github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c + github.com/peterbourgon/diskv v2.0.1+incompatible // indirect + github.com/pkg/errors v0.8.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 // indirect + github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect + github.com/sirupsen/logrus v1.0.6 // indirect + github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf // indirect + github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a // indirect + github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d + github.com/spf13/pflag v1.0.2 // indirect + github.com/stretchr/testify v1.2.2 // indirect + github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9 // indirect + github.com/vmware/govmomi v0.18.0 + github.com/vmware/vic v1.4.1 + golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac // indirect + golang.org/x/net v0.0.0-20180826012351-8a410e7b638d // indirect + golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95 + golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654 // indirect + golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b // indirect + golang.org/x/text v0.3.0 // indirect + golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 // indirect + google.golang.org/api v0.0.0-20180829000535-087779f1d2c9 + google.golang.org/appengine v1.1.0 // indirect + gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect + gopkg.in/fsnotify.v1 v1.4.7 // indirect + gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect + gopkg.in/inf.v0 v0.9.1 // indirect + gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect + gopkg.in/yaml.v2 v2.2.1 // indirect + k8s.io/api v0.0.0-20180806132203-61b11ee65332 + k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f + k8s.io/client-go v8.0.0+incompatible +) diff --git a/vendor/github.com/hashicorp/go-discover/go.sum b/vendor/github.com/hashicorp/go-discover/go.sum new file mode 100644 index 000000000000..e379ec629ba8 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/go.sum @@ -0,0 +1,144 @@ +cloud.google.com/go v0.26.0 h1:e0WKqKTd5BnrG8aKH3J3h+QvEIQtSUcf2n5UZ5ZgLtQ= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/Azure/azure-sdk-for-go v16.0.0+incompatible h1:gr1qKY/Ll72VjFTZmaBwRK1yQHAxCnV25ekOKroc9ws= +github.com/Azure/azure-sdk-for-go v16.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/go-autorest v10.7.0+incompatible h1:dB+dKSLGdJLEhU/FoZTSNSPMZuE5H4M5p5zgSct7qwM= +github.com/Azure/go-autorest v10.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= +github.com/Sirupsen/logrus v1.0.6 h1:HCAGQRk48dRVPA5Y+Yh0qdCSTzPOyU1tBJ7Q9YzotII= +github.com/Sirupsen/logrus v1.0.6/go.mod h1:rmk17hk6i8ZSAJkSDa7nOxamrG+SP4P0mm+DAvExv4U= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af h1:DBNMBMuMiWYu0b+8KMJuWmfCkcxl09JwdlqwDZZ6U14= +github.com/abdullin/seq v0.0.0-20160510034733-d5467c17e7af/go.mod h1:5Jv4cbFiHJMsVxt52+i0Ha45fjshj6wxYr1r19tB9bw= +github.com/aws/aws-sdk-go v1.15.24 h1:xLAdTA/ore6xdPAljzZRed7IGqQgC+nY+ERS5vaj4Ro= +github.com/aws/aws-sdk-go v1.15.24/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661 h1:lrWnAyy/F72MbxIxFUzKmcMCdt9Oi8RzpAxzTNQHD7o= +github.com/denverdino/aliyungo v0.0.0-20170926055100-d3308649c661/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/digitalocean/godo v1.1.1 h1:v0A7yF3xmKLjjdJGIeBbINfMufcrrRhqZsxuVQMoT+U= +github.com/digitalocean/godo v1.1.1/go.mod h1:h6faOIcZ8lWIwNQ+DN7b3CgX4Kwby5T+nbpNqkUIozU= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-ini/ini v1.25.4 h1:Mujh4R/dH6YL8bxuISne3xX2+qcQ9p0IxKAP6ExWoUo= +github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/gogo/protobuf v1.1.1 h1:72R+M5VuhED/KujmZVcIquuo8mBgX4oVda//DQb3PXo= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c h1:964Od4U6p2jUkFxvCydnIczKteheJEzHRToSGK3Bnlw= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135 h1:zLTLjkaOFEFIOxY5BWLFLwh+cL8vOBW4XJ2aqLE/Tf0= +github.com/google/go-querystring v0.0.0-20170111101155-53e6ce116135/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf h1:+RRA9JqSOZFfKrOeqr2z77+8R2RKyh8PG66dcu1V0ck= +github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= +github.com/googleapis/gnostic v0.2.0 h1:l6N3VoaVzTncYYW+9yOz2LJJammFZGBO13sqgEhpy9g= +github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= +github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca h1:wobTb8SE189AuxzEKClyYxiI4nUGWlpVtl13eLiFlOE= +github.com/gophercloud/gophercloud v0.0.0-20180828235145-f29afc2cceca/go.mod h1:3WdhXV3rUYy9p6AUW8d94kr+HS62Y4VL9mBnFxsD8q4= +github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c h1:16eHWuMGvCjSfgRJKqIzapE78onvvTbdi1rMkU00lZw= +github.com/gopherjs/gopherjs v0.0.0-20180825215210-0210a2f0f73c/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/go-multierror v1.0.0 h1:iVjPR7a6H0tWELX5NxNe7bYopibicUzc7uPribsnS6o= +github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= +github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da h1:FjHUJJ7oBW4G/9j1KzlHaXL09LyMVM9rupS39lncbXk= +github.com/jarcoal/httpmock v0.0.0-20180424175123-9c70cfe4a1da/go.mod h1:ks+b9deReOc7jgqp+e7LuFiCBH6Rm5hL32cLcEAArb4= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8 h1:12VvqtR6Aowv3l/EQUlocDHW2Cp4G9WJVH7uyH8QFJE= +github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62 h1:JHCT6xuyPUrbbgAPE/3dqlvUKzRHMNuTBKKUb6OeR/k= +github.com/joyent/triton-go v0.0.0-20180628001255-830d2b111e62/go.mod h1:U+RSyWxWd04xTqnuOQxnai7XGS2PrPY2cfGoDKtMHjA= +github.com/json-iterator/go v1.1.5 h1:gL2yXlmiIo4+t+y32d4WGwOjKGYcGOuyrg46vadswDE= +github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/jtolds/gls v4.2.1+incompatible h1:fSuqC+Gmlu6l/ZYAoZzx2pyucC8Xza35fpRVWLVmUEE= +github.com/jtolds/gls v4.2.1+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/mitchellh/go-homedir v1.0.0 h1:vKb8ShqSby24Yrqr/yDYkuFz8d0WUjys40rvnGC8aR0= +github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2 h1:BQ1HW7hr4IVovMwWg0E0PYcyW8CzqDcVmaew9cujU4s= +github.com/nicolai86/scaleway-sdk v1.10.2-0.20180628010248-798f60e20bb2/go.mod h1:TLb2Sg7HQcgGdloNxkrmtgDNR9uVYF3lfdFIN4Ro6Sk= +github.com/onsi/ginkgo v1.6.0 h1:Ix8l273rp3QzYgXSR+c8d1fTG7UPgYkOSELPhiY/YGw= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.1 h1:PZSj/UFNaVp3KxrzHOcS7oyuWA7LoOY/77yCTEFu21U= +github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c h1:vwpFWvAO8DeIZfFeqASzZfsxuWPno9ncAebBEP0N3uE= +github.com/packethost/packngo v0.1.1-0.20180711074735-b9cb5096f54c/go.mod h1:otzZQXgoO96RTzDB/Hycg0qZcXZsWJGJRSXbmEIJ+4M= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= +github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03 h1:Wdi9nwnhFNAlseAOekn6B5G/+GMtks9UKbvRU/CMM/o= +github.com/renier/xmlrpc v0.0.0-20170708154548-ce4a1a486c03/go.mod h1:gRAiPF5C5Nd0eyyRdqIu9qTiFSoZzpTq727b5B8fkkU= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I= +github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= +github.com/sirupsen/logrus v1.0.6 h1:hcP1GmhGigz/O7h1WVUM5KklBp1JoNS9FggWKdj/j3s= +github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf h1:6V1qxN6Usn4jy8unvggSJz/NC790tefw8Zdy6OZS5co= +github.com/smartystreets/assertions v0.0.0-20180820201707-7c9eb446e3cf/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a h1:JSvGDIbmil4Ui/dDdFBExb7/cmkNjyX5F97oglmvCDo= +github.com/smartystreets/goconvey v0.0.0-20180222194500-ef6db91d284a/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d h1:bVQRCxQvfjNUeRqaY/uT0tFuvuFY0ulgnczuR684Xic= +github.com/softlayer/softlayer-go v0.0.0-20180806151055-260589d94c7d/go.mod h1:Cw4GTlQccdRGSEf6KiMju767x0NEHE0YIVPJSaXjlsw= +github.com/spf13/pflag v1.0.2 h1:Fy0orTDgHdbnzHcsOgfCN4LtHf0ec3wwtiwJqwvf3Gc= +github.com/spf13/pflag v1.0.2/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9 h1:/Bsw4C+DEdqPjt8vAqaC9LAqpAQnaCQQqmolqq3S1T4= +github.com/tent/http-link-go v0.0.0-20130702225549-ac974c61c2f9/go.mod h1:RHkNRtSLfOK7qBTHaeSX1D6BNpI3qw7NTxsmNr4RvN8= +github.com/vmware/govmomi v0.18.0 h1:f7QxSmP7meCtoAmiKZogvVbLInT+CZx6Px6K5rYsJZo= +github.com/vmware/govmomi v0.18.0/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/vmware/vic v1.4.1 h1:M8hwS40QBjVTWU46qSM+tCAUSMHTTyduCmtj9OPR49A= +github.com/vmware/vic v1.4.1/go.mod h1:AiTDrZuV13NkqRzseA5ZmF2QqLpTydaaGN75xgV6Ork= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac h1:7d7lG9fHOLdL6jZPtnV4LpI41SbohIJ1Atq7U991dMg= +golang.org/x/crypto v0.0.0-20180820150726-614d502a4dac/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d h1:g9qWBGx4puODJTMVyoPrpoxPFgVGd+z1DZwjfRu4d0I= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95 h1:RS+wSrhdVci7CsPwJaMN8exaP3UTuQU0qB34R/E/JD0= +golang.org/x/oauth2 v0.0.0-20170807180024-9a379c6b3e95/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654 h1:ogYMTsPtkSw1JROKERM/ualwPUvE2UOP2KfhwFO6aVE= +golang.org/x/sync v0.0.0-20170927054112-8e0aa688b654/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b h1:cmOZLU2i7CLArKNViO+ZCQ47wqYFyKEIpbGWp+b6Uoc= +golang.org/x/sys v0.0.0-20180828065106-d99a578cf41b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2 h1:+DCIGbF/swA92ohVg0//6X2IVY3KZs6p9mix0ziNYJM= +golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +google.golang.org/api v0.0.0-20180829000535-087779f1d2c9 h1:z1TeLUmxf9ws9KLICfmX+KGXTs+rjm+aGWzfsv7MZ9w= +google.golang.org/api v0.0.0-20180829000535-087779f1d2c9/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.1.0 h1:igQkv0AAhEIvTEpD5LIpAfav2eeVO9HBTjvKHVJPRSs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +gopkg.in/airbrake/gobrake.v2 v2.0.9 h1:7z2uVWwn7oVeeugY1DtlPAy5H+KYgB1KeKTnqjNatLo= +gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 h1:OAj3g0cR6Dx/R07QgQe8wkA9RNjB2u4i700xBkIT4e0= +gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= +gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +k8s.io/api v0.0.0-20180806132203-61b11ee65332 h1:+ED/2NBbOoeWB9QrGTHxZI7UnE7rnHPKKumOl0WXphs= +k8s.io/api v0.0.0-20180806132203-61b11ee65332/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA= +k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f h1:V0PkbgaYp5JqCmzLyRmssDtzim0NShXM8gYi4fcX230= +k8s.io/apimachinery v0.0.0-20180821005732-488889b0007f/go.mod h1:ccL7Eh7zubPUSh9A3USN90/OzHNSVN6zxzde07TDCL0= +k8s.io/client-go v8.0.0+incompatible h1:7Zl+OVXn0bobcsi4NEZGdoQDTE9ij1zPMfM21+yqQsM= +k8s.io/client-go v8.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s= diff --git a/vendor/github.com/hashicorp/go-discover/provider/k8s/k8s_discover.go b/vendor/github.com/hashicorp/go-discover/provider/k8s/k8s_discover.go new file mode 100644 index 000000000000..28bc8fed5384 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/k8s/k8s_discover.go @@ -0,0 +1,209 @@ +// Package k8s provides pod discovery for Kubernetes. +package k8s + +import ( + "fmt" + "log" + "path/filepath" + "strconv" + + "github.com/hashicorp/go-multierror" + "github.com/mitchellh/go-homedir" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + + // Register all known auth mechanisms since we might be authenticating + // from anywhere. + _ "k8s.io/client-go/plugin/pkg/client/auth" +) + +const ( + // AnnotationKeyPort is the annotation name of the field that specifies + // the port name or number to append to the address. + AnnotationKeyPort = "consul.hashicorp.com/auto-join-port" +) + +type Provider struct{} + +func (p *Provider) Help() string { + return `Kubernetes (K8S): + + provider: "k8s" + kubeconfig: Path to the kubeconfig file. + namespace: Namespace to search for pods (defaults to "default"). + label_selector: Label selector value to filter pods. + field_selector: Field selector value to filter pods. + host_network: "true" if pod host IP and ports should be used. + + The kubeconfig file value will be searched in the following locations: + + 1. Use path from "kubeconfig" option if provided. + 2. Use path from KUBECONFIG environment variable. + 3. Use default path of $HOME/.kube/config + + By default, the Pod IP is used to join. The "host_network" option may + be set to use the Host IP. No port is used by default. Pods may set + an annotation 'hashicorp/consul-auto-join-port' to a named port or + an integer value. If the value matches a named port, that port will + be used to join. + + Note that if "host_network" is set to true, then only pods that have + a HostIP available will be selected. If a port annotation exists, then + the port must be exposed via a HostPort as well, otherwise the pod will + be ignored. +` +} + +func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error) { + if args["provider"] != "k8s" { + return nil, fmt.Errorf("discover-k8s: invalid provider " + args["provider"]) + } + + // Get the configuration. This can come from multiple sources. We first + // try kubeconfig it is set directly, then we fall back to in-cluster + // auth. Finally, we try the default kubeconfig path. + kubeconfig := args["kubeconfig"] + if kubeconfig == "" { + // If kubeconfig is empty, let's first try the default directory. + // This is must faster than trying in-cluster auth so we try this + // first. + dir, err := homedir.Dir() + if err != nil { + return nil, fmt.Errorf("discover-k8s: error retrieving home directory: %s", err) + } + kubeconfig = filepath.Join(dir, ".kube", "config") + } + + // First try to get the configuration from the kubeconfig value + config, configErr := clientcmd.BuildConfigFromFlags("", kubeconfig) + if configErr != nil { + configErr = fmt.Errorf("discover-k8s: error loading kubeconfig: %s", configErr) + + // kubeconfig failed, fall back and try in-cluster config. We do + // this as the fallback since this makes network connections and + // is much slower to fail. + var err error + config, err = rest.InClusterConfig() + if err != nil { + return nil, multierror.Append(configErr, fmt.Errorf( + "discover-k8s: error loading in-cluster config: %s", err)) + } + } + + // Initialize the clientset + clientset, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, fmt.Errorf("discover-k8s: error initializing k8s client: %s", err) + } + + namespace := args["namespace"] + if namespace == "" { + namespace = "default" + } + + // List all the pods based on the filters we requested + pods, err := clientset.CoreV1().Pods(namespace).List(metav1.ListOptions{ + LabelSelector: args["label_selector"], + FieldSelector: args["field_selector"], + }) + if err != nil { + return nil, fmt.Errorf("discover-k8s: error listing pods: %s", err) + } + + return PodAddrs(pods, args, l) +} + +// PodAddrs extracts the addresses from a list of pods. +// +// This is a separate method so that we can unit test this without having +// to setup complicated K8S cluster scenarios. It shouldn't generally be +// called externally. +func PodAddrs(pods *corev1.PodList, args map[string]string, l *log.Logger) ([]string, error) { + hostNetwork := false + if v := args["host_network"]; v != "" { + var err error + hostNetwork, err = strconv.ParseBool(v) + if err != nil { + return nil, fmt.Errorf("discover-k8s: host_network must be boolean value: %s", err) + } + } + + var addrs []string +PodLoop: + for _, pod := range pods.Items { + if pod.Status.Phase != corev1.PodRunning { + l.Printf("[DEBUG] discover-k8s: ignoring pod %q, not running: %q", + pod.Name, pod.Status.Phase) + continue + } + + // If there is a Ready condition available, we need that to be true. + // If no ready condition is set, then we accept this pod regardless. + for _, condition := range pod.Status.Conditions { + if condition.Type == corev1.PodReady && condition.Status != corev1.ConditionTrue { + l.Printf("[DEBUG] discover-k8s: ignoring pod %q, not ready state", pod.Name) + continue PodLoop + } + } + + // Get the IP address that we will join. + addr := pod.Status.PodIP + if hostNetwork { + addr = pod.Status.HostIP + } + if addr == "" { + // This can be empty according to the API docs, so we protect that. + l.Printf("[DEBUG] discover-k8s: ignoring pod %q, requested IP is empty", pod.Name) + continue + } + + // We only use the port if it is specified as an annotation. The + // annotation value can be a name or a number. + if v := pod.Annotations[AnnotationKeyPort]; v != "" { + port, err := podPort(&pod, v, hostNetwork) + if err != nil { + l.Printf("[DEBUG] discover-k8s: ignoring pod %q, error retrieving port: %s", + pod.Name, err) + continue + } + + addr = fmt.Sprintf("%s:%d", addr, port) + } + + addrs = append(addrs, addr) + } + + return addrs, nil +} + +// podPort extracts the proper port for the address from the given pod +// for a non-empty annotation. +// +// Pre-condition: annotation is non-empty +func podPort(pod *corev1.Pod, annotation string, host bool) (int32, error) { + // First look for a matching port matching the value of the annotation. + for _, container := range pod.Spec.Containers { + for _, portDef := range container.Ports { + if portDef.Name == annotation { + if host { + // It is possible for HostPort to be zero, if that is the + // case then we ignore this port. + if portDef.HostPort == 0 { + continue + } + + return portDef.HostPort, nil + } + + return portDef.ContainerPort, nil + } + } + } + + // Otherwise assume that the port is a numeric value. + v, err := strconv.ParseInt(annotation, 0, 32) + return int32(v), err +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go b/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go index 6db860ff5c53..278003579145 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go +++ b/vendor/github.com/hashicorp/go-discover/provider/os/os_discover.go @@ -128,16 +128,16 @@ func newClient(args map[string]string, l *log.Logger) (*gophercloud.ServiceClien } projectID := argsOrEnv(args, "project_id", "OS_PROJECT_ID") insecure := argsOrEnv(args, "insecure", "OS_INSECURE") + domain_id := argsOrEnv(args, "domain_id", "OS_DOMAIN_ID") + domain_name := argsOrEnv(args, "domain_name", "OS_DOMAIN_NAME") if url == "" { return nil, fmt.Errorf("discover-os: Auth url must be provided") } ao := gophercloud.AuthOptions{ - // "domain_id": OS_DOMAIN_ID - DomainID: "", - // "domain_name": OS_DOMAIN_NAME - DomainName: "", + DomainID: domain_id, + DomainName: domain_name, IdentityEndpoint: url, Username: username, Password: password, diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go index 11bd2dab8732..043cb2fe9083 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/scaleway_discover.go @@ -6,7 +6,7 @@ import ( "io/ioutil" "log" - "github.com/nicolai86/scaleway-sdk/api" + api "github.com/nicolai86/scaleway-sdk" ) type Provider struct{} @@ -58,7 +58,7 @@ func (p *Provider) Addrs(args map[string]string, l *log.Logger) ([]string, error // Filter servers by tag var addrs []string if servers != nil { - for _, server := range *servers { + for _, server := range servers { if stringInSlice(tagName, server.Tags) { l.Printf("[DEBUG] discover-scaleway: Found server (%s) - %s with private IP: %s", server.Name, server.Hostname, server.PrivateIP) diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md index 7503a16ca20a..12e056f64e07 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/LICENSE.md @@ -1,8 +1,6 @@ The MIT License =============== -Copyright (c) **2014-2016 Scaleway ([@scaleway](https://twitter.com/scaleway))** - Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/README.md b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/README.md new file mode 100644 index 000000000000..b0c8dc1fe19a --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/README.md @@ -0,0 +1,9 @@ +# Scaleway SDK + +A fork of the scaleway cli repository which only aims at being an API SDK - nothing more. + +## Tests + +```bash +$ go test ./... +``` \ No newline at end of file diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/api.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api.go similarity index 70% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/api.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api.go index 82ddd25ad9a7..fad62e78c3ed 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/api.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api.go @@ -1,10 +1,10 @@ -// Copyright (C) 2015 Scaleway. All rights reserved. +// Copyright (C) 2015 . All rights reserved. // Use of this source code is governed by a MIT-style // license that can be found in the LICENSE.md file. -// Interact with Scaleway API +// Interact with API -// Package api contains client and functions to interact with Scaleway API +// Package api contains client and functions to interact with API package api import ( @@ -23,17 +23,15 @@ import ( "golang.org/x/sync/errgroup" ) -// https://cp-par1.scaleway.com/products/servers -// https://cp-ams1.scaleway.com/products/servers +// https://cp-par1..com/products/servers +// https://cp-ams1..com/products/servers // Default values var ( - AccountAPI = "https://account.scaleway.com/" - MetadataAPI = "http://169.254.42.42/" - MarketplaceAPI = "https://api-marketplace.scaleway.com" - ComputeAPIPar1 = "https://cp-par1.scaleway.com/" - ComputeAPIAms1 = "https://cp-ams1.scaleway.com/" - AvailabilityAPIPar1 = "https://availability.scaleway.com/" - AvailabilityAPIAms1 = "https://availability-ams1.scaleway.com/" + AccountAPI = "https://account.scaleway.com/" + MetadataAPI = "http://169.254.42.42/" + MarketplaceAPI = "https://api-marketplace.scaleway.com" + ComputeAPIPar1 = "https://cp-par1.scaleway.com/" + ComputeAPIAms1 = "https://cp-ams1.scaleway.com/" URLPublicDNS = ".pub.cloud.scaleway.com" URLPrivateDNS = ".priv.cloud.scaleway.com" @@ -60,28 +58,21 @@ type HTTPClient interface { Do(*http.Request) (*http.Response, error) } -// ScalewayAPI is the interface used to communicate with the Scaleway API -type ScalewayAPI struct { - // Organization is the identifier of the Scaleway organization - Organization string +// API is the interface used to communicate with the API +type API struct { + Organization string // Organization is the identifier of the organization + Token string // Token is the authentication token for the organization + Client HTTPClient // Client is used for all HTTP interactions - // Token is the authentication token for the Scaleway organization - Token string - - // Password is the authentication password - password string - - userAgent string - - client HTTPClient - computeAPI string - availabilityAPI string + password string // Password is the authentication password + userAgent string + computeAPI string Region string } -// ScalewayAPIError represents a Scaleway API Error -type ScalewayAPIError struct { +// APIError represents a API Error +type APIError struct { // Message is a human-friendly error message APIMessage string `json:"message,omitempty"` @@ -99,7 +90,7 @@ type ScalewayAPIError struct { } // Error returns a string representing the error -func (e ScalewayAPIError) Error() string { +func (e APIError) Error() string { var b bytes.Buffer fmt.Fprintf(&b, "StatusCode: %v, ", e.StatusCode) @@ -111,17 +102,17 @@ func (e ScalewayAPIError) Error() string { return b.String() } -// New creates a ready-to-use Scaleway SDK client -func New(organization, token, region string, options ...func(*ScalewayAPI)) (*ScalewayAPI, error) { - s := &ScalewayAPI{ +// New creates a ready-to-use SDK client +func New(organization, token, region string, options ...func(*API)) (*API, error) { + s := &API{ // exposed Organization: organization, Token: token, + Client: &http.Client{}, // internal - client: &http.Client{}, password: "", - userAgent: "scaleway-sdk", + userAgent: "-sdk", } for _, option := range options { option(s) @@ -129,10 +120,8 @@ func New(organization, token, region string, options ...func(*ScalewayAPI)) (*Sc switch region { case "par1", "": s.computeAPI = ComputeAPIPar1 - s.availabilityAPI = AvailabilityAPIPar1 case "ams1": s.computeAPI = ComputeAPIAms1 - s.availabilityAPI = AvailabilityAPIAms1 default: return nil, fmt.Errorf("%s isn't a valid region", region) } @@ -140,13 +129,10 @@ func New(organization, token, region string, options ...func(*ScalewayAPI)) (*Sc if url := os.Getenv("SCW_COMPUTE_API"); url != "" { s.computeAPI = url } - if url := os.Getenv("SCW_AVAILABILITY_API"); url != "" { - s.availabilityAPI = url - } return s, nil } -func (s *ScalewayAPI) response(method, uri string, content io.Reader) (resp *http.Response, err error) { +func (s *API) response(method, uri string, content io.Reader) (resp *http.Response, err error) { var ( req *http.Request ) @@ -159,12 +145,12 @@ func (s *ScalewayAPI) response(method, uri string, content io.Reader) (resp *htt req.Header.Set("X-Auth-Token", s.Token) req.Header.Set("Content-Type", "application/json") req.Header.Set("User-Agent", s.userAgent) - resp, err = s.client.Do(req) + resp, err = s.Client.Do(req) return } // GetResponsePaginate fetchs all resources and returns an http.Response object for the requested resource -func (s *ScalewayAPI) GetResponsePaginate(apiURL, resource string, values url.Values) (*http.Response, error) { +func (s *API) GetResponsePaginate(apiURL, resource string, values url.Values) (*http.Response, error) { resp, err := s.response("HEAD", fmt.Sprintf("%s/%s?%s", strings.TrimRight(apiURL, "/"), resource, values.Encode()), nil) if err != nil { return nil, err @@ -252,7 +238,7 @@ func (s *ScalewayAPI) GetResponsePaginate(apiURL, resource string, values url.Va } // PostResponse returns an http.Response object for the updated resource -func (s *ScalewayAPI) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) { +func (s *API) PostResponse(apiURL, resource string, data interface{}) (*http.Response, error) { payload := new(bytes.Buffer) if err := json.NewEncoder(payload).Encode(data); err != nil { return nil, err @@ -261,7 +247,7 @@ func (s *ScalewayAPI) PostResponse(apiURL, resource string, data interface{}) (* } // PatchResponse returns an http.Response object for the updated resource -func (s *ScalewayAPI) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) { +func (s *API) PatchResponse(apiURL, resource string, data interface{}) (*http.Response, error) { payload := new(bytes.Buffer) if err := json.NewEncoder(payload).Encode(data); err != nil { return nil, err @@ -270,7 +256,7 @@ func (s *ScalewayAPI) PatchResponse(apiURL, resource string, data interface{}) ( } // PutResponse returns an http.Response object for the updated resource -func (s *ScalewayAPI) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) { +func (s *API) PutResponse(apiURL, resource string, data interface{}) (*http.Response, error) { payload := new(bytes.Buffer) if err := json.NewEncoder(payload).Encode(data); err != nil { return nil, err @@ -279,12 +265,12 @@ func (s *ScalewayAPI) PutResponse(apiURL, resource string, data interface{}) (*h } // DeleteResponse returns an http.Response object for the deleted resource -func (s *ScalewayAPI) DeleteResponse(apiURL, resource string) (*http.Response, error) { +func (s *API) DeleteResponse(apiURL, resource string) (*http.Response, error) { return s.response("DELETE", fmt.Sprintf("%s/%s", strings.TrimRight(apiURL, "/"), resource), nil) } // handleHTTPError checks the statusCode and displays the error -func (s *ScalewayAPI) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) { +func (s *API) handleHTTPError(goodStatusCode []int, resp *http.Response) ([]byte, error) { body, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, err @@ -293,25 +279,24 @@ func (s *ScalewayAPI) handleHTTPError(goodStatusCode []int, resp *http.Response) if resp.StatusCode >= http.StatusInternalServerError { return nil, errors.New(string(body)) } - good := false + for _, code := range goodStatusCode { if code == resp.StatusCode { - good = true + return body, nil } } - if !good { - var scwError ScalewayAPIError + var scwError APIError + scwError.StatusCode = resp.StatusCode + if len(body) > 0 { if err := json.Unmarshal(body, &scwError); err != nil { return nil, err } - scwError.StatusCode = resp.StatusCode - return nil, scwError } - return body, nil + return nil, scwError } // SetPassword register the password -func (s *ScalewayAPI) SetPassword(password string) { +func (s *API) SetPassword(password string) { s.password = password } diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/availability.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/availability.go deleted file mode 100644 index fe5e23ac14bf..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/availability.go +++ /dev/null @@ -1,37 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "io/ioutil" -) - -type ServerAvailabilities map[string]interface{} - -func (a ServerAvailabilities) CommercialTypes() []string { - types := []string{} - for k, v := range a { - if _, ok := v.(bool); !ok { - continue - } - types = append(types, k) - } - return types -} - -func (s *ScalewayAPI) GetServerAvailabilities() (ServerAvailabilities, error) { - resp, err := s.response("GET", fmt.Sprintf("%s/availability.json", s.availabilityAPI), nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - bs, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - content := ServerAvailabilities{} - if err := json.Unmarshal(bs, &content); err != nil { - return nil, err - } - return content, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/container.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/container.go deleted file mode 100644 index d1173b20fc0d..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/container.go +++ /dev/null @@ -1,72 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// ScalewayContainerData represents a Scaleway container data (S3) -type ScalewayContainerData struct { - LastModified string `json:"last_modified"` - Name string `json:"name"` - Size string `json:"size"` -} - -// ScalewayGetContainerDatas represents a list of Scaleway containers data (S3) -type ScalewayGetContainerDatas struct { - Container []ScalewayContainerData `json:"container"` -} - -// ScalewayContainer represents a Scaleway container (S3) -type ScalewayContainer struct { - ScalewayOrganizationDefinition `json:"organization"` - Name string `json:"name"` - Size string `json:"size"` -} - -// ScalewayGetContainers represents a list of Scaleway containers (S3) -type ScalewayGetContainers struct { - Containers []ScalewayContainer `json:"containers"` -} - -// GetContainers returns a ScalewayGetContainers -func (s *ScalewayAPI) GetContainers() (*ScalewayGetContainers, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, "containers", url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var containers ScalewayGetContainers - - if err = json.Unmarshal(body, &containers); err != nil { - return nil, err - } - return &containers, nil -} - -// GetContainerDatas returns a ScalewayGetContainerDatas -func (s *ScalewayAPI) GetContainerDatas(container string) (*ScalewayGetContainerDatas, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("containers/%s", container), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var datas ScalewayGetContainerDatas - - if err = json.Unmarshal(body, &datas); err != nil { - return nil, err - } - return &datas, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/organization.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/organization.go deleted file mode 100644 index 29e4b436fb8e..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/organization.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - "net/url" -) - -// ScalewayOrganizationDefinition represents a Scaleway Organization -type ScalewayOrganizationDefinition struct { - ID string `json:"id"` - Name string `json:"name"` - Users []ScalewayUserDefinition `json:"users"` -} - -// ScalewayOrganizationsDefinition represents a Scaleway Organizations -type ScalewayOrganizationsDefinition struct { - Organizations []ScalewayOrganizationDefinition `json:"organizations"` -} - -// GetOrganization returns Organization -func (s *ScalewayAPI) GetOrganization() (*ScalewayOrganizationsDefinition, error) { - resp, err := s.GetResponsePaginate(AccountAPI, "organizations", url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var data ScalewayOrganizationsDefinition - - if err = json.Unmarshal(body, &data); err != nil { - return nil, err - } - return &data, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/permissions.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/permissions.go deleted file mode 100644 index c3cdaaa47607..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/permissions.go +++ /dev/null @@ -1,39 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// ScalewayPermissions represents the response of GET /permissions -type ScalewayPermissions map[string]ScalewayPermCategory - -// ScalewayPermCategory represents ScalewayPermissions's fields -type ScalewayPermCategory map[string][]string - -// ScalewayPermissionDefinition represents the permissions -type ScalewayPermissionDefinition struct { - Permissions ScalewayPermissions `json:"permissions"` -} - -// GetPermissions returns the permissions -func (s *ScalewayAPI) GetPermissions() (*ScalewayPermissionDefinition, error) { - resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var permissions ScalewayPermissionDefinition - - if err = json.Unmarshal(body, &permissions); err != nil { - return nil, err - } - return &permissions, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group.go deleted file mode 100644 index 26b5cef4e6ce..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group.go +++ /dev/null @@ -1,129 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// ScalewaySecurityGroups definition -type ScalewaySecurityGroups struct { - Description string `json:"description"` - ID string `json:"id"` - Organization string `json:"organization"` - Name string `json:"name"` - Servers []ScalewaySecurityGroup `json:"servers"` - EnableDefaultSecurity bool `json:"enable_default_security"` - OrganizationDefault bool `json:"organization_default"` -} - -// ScalewayGetSecurityGroups represents the response of a GET /security_groups/ -type ScalewayGetSecurityGroups struct { - SecurityGroups []ScalewaySecurityGroups `json:"security_groups"` -} - -// ScalewayGetSecurityGroup represents the response of a GET /security_groups/{groupID} -type ScalewayGetSecurityGroup struct { - SecurityGroups ScalewaySecurityGroups `json:"security_group"` -} - -// ScalewaySecurityGroup represents a Scaleway security group -type ScalewaySecurityGroup struct { - // Identifier is a unique identifier for the security group - Identifier string `json:"id,omitempty"` - - // Name is the user-defined name of the security group - Name string `json:"name,omitempty"` -} - -// ScalewayNewSecurityGroup definition POST request /security_groups -type ScalewayNewSecurityGroup struct { - Organization string `json:"organization"` - Name string `json:"name"` - Description string `json:"description"` -} - -// ScalewayUpdateSecurityGroup definition PUT request /security_groups -type ScalewayUpdateSecurityGroup struct { - Organization string `json:"organization"` - Name string `json:"name"` - Description string `json:"description"` - OrganizationDefault bool `json:"organization_default"` -} - -// DeleteSecurityGroup deletes a SecurityGroup -func (s *ScalewayAPI) DeleteSecurityGroup(securityGroupID string) error { - resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID)) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) - return err -} - -// PutSecurityGroup updates a SecurityGroup -func (s *ScalewayAPI) PutSecurityGroup(group ScalewayUpdateSecurityGroup, securityGroupID string) error { - resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusOK}, resp) - return err -} - -// GetASecurityGroup returns a ScalewaySecurityGroup -func (s *ScalewayAPI) GetASecurityGroup(groupsID string) (*ScalewayGetSecurityGroup, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s", groupsID), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var securityGroups ScalewayGetSecurityGroup - - if err = json.Unmarshal(body, &securityGroups); err != nil { - return nil, err - } - return &securityGroups, nil -} - -// PostSecurityGroup posts a group on a server -func (s *ScalewayAPI) PostSecurityGroup(group ScalewayNewSecurityGroup) error { - resp, err := s.PostResponse(s.computeAPI, "security_groups", group) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) - return err -} - -// GetSecurityGroups returns a ScalewaySecurityGroups -func (s *ScalewayAPI) GetSecurityGroups() (*ScalewayGetSecurityGroups, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, "security_groups", url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var securityGroups ScalewayGetSecurityGroups - - if err = json.Unmarshal(body, &securityGroups); err != nil { - return nil, err - } - return &securityGroups, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group_rule.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group_rule.go deleted file mode 100644 index 0735c8d380ac..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/security_group_rule.go +++ /dev/null @@ -1,116 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// ScalewaySecurityGroupRule definition -type ScalewaySecurityGroupRule struct { - Direction string `json:"direction"` - Protocol string `json:"protocol"` - IPRange string `json:"ip_range"` - DestPortFrom int `json:"dest_port_from,omitempty"` - Action string `json:"action"` - Position int `json:"position"` - DestPortTo string `json:"dest_port_to"` - Editable bool `json:"editable"` - ID string `json:"id"` -} - -// ScalewayGetSecurityGroupRules represents the response of a GET /security_group/{groupID}/rules -type ScalewayGetSecurityGroupRules struct { - Rules []ScalewaySecurityGroupRule `json:"rules"` -} - -// ScalewayGetSecurityGroupRule represents the response of a GET /security_group/{groupID}/rules/{ruleID} -type ScalewayGetSecurityGroupRule struct { - Rules ScalewaySecurityGroupRule `json:"rule"` -} - -// ScalewayNewSecurityGroupRule definition POST/PUT request /security_group/{groupID} -type ScalewayNewSecurityGroupRule struct { - Action string `json:"action"` - Direction string `json:"direction"` - IPRange string `json:"ip_range"` - Protocol string `json:"protocol"` - DestPortFrom int `json:"dest_port_from,omitempty"` -} - -// GetSecurityGroupRules returns a ScalewaySecurityGroupRules -func (s *ScalewayAPI) GetSecurityGroupRules(groupID string) (*ScalewayGetSecurityGroupRules, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", groupID), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var securityGroupRules ScalewayGetSecurityGroupRules - - if err = json.Unmarshal(body, &securityGroupRules); err != nil { - return nil, err - } - return &securityGroupRules, nil -} - -// GetASecurityGroupRule returns a ScalewaySecurityGroupRule -func (s *ScalewayAPI) GetASecurityGroupRule(groupID string, rulesID string) (*ScalewayGetSecurityGroupRule, error) { - resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var securityGroupRules ScalewayGetSecurityGroupRule - - if err = json.Unmarshal(body, &securityGroupRules); err != nil { - return nil, err - } - return &securityGroupRules, nil -} - -// PostSecurityGroupRule posts a rule on a server -func (s *ScalewayAPI) PostSecurityGroupRule(SecurityGroupID string, rules ScalewayNewSecurityGroupRule) error { - resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", SecurityGroupID), rules) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusCreated}, resp) - return err -} - -// PutSecurityGroupRule updates a SecurityGroupRule -func (s *ScalewayAPI) PutSecurityGroupRule(rules ScalewayNewSecurityGroupRule, securityGroupID, RuleID string) error { - resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", securityGroupID, RuleID), rules) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusOK}, resp) - return err -} - -// DeleteSecurityGroupRule deletes a SecurityGroupRule -func (s *ScalewayAPI) DeleteSecurityGroupRule(SecurityGroupID, RuleID string) error { - resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", SecurityGroupID, RuleID)) - if err != nil { - return err - } - defer resp.Body.Close() - - _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) - return err -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/tasks.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/tasks.go deleted file mode 100644 index 3cadbad94d94..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/tasks.go +++ /dev/null @@ -1,59 +0,0 @@ -package api - -import ( - "encoding/json" - "net/http" - "net/url" -) - -// ScalewayTask represents a Scaleway Task -type ScalewayTask struct { - // Identifier is a unique identifier for the task - Identifier string `json:"id,omitempty"` - - // StartDate is the start date of the task - StartDate string `json:"started_at,omitempty"` - - // TerminationDate is the termination date of the task - TerminationDate string `json:"terminated_at,omitempty"` - - HrefFrom string `json:"href_from,omitempty"` - - Description string `json:"description,omitempty"` - - Status string `json:"status,omitempty"` - - Progress int `json:"progress,omitempty"` -} - -// ScalewayOneTask represents the response of a GET /tasks/UUID API call -type ScalewayOneTask struct { - Task ScalewayTask `json:"task,omitempty"` -} - -// ScalewayTasks represents a group of Scaleway tasks -type ScalewayTasks struct { - // Tasks holds scaleway tasks of the response - Tasks []ScalewayTask `json:"tasks,omitempty"` -} - -// GetTasks get the list of tasks from the ScalewayAPI -func (s *ScalewayAPI) GetTasks() (*[]ScalewayTask, error) { - query := url.Values{} - resp, err := s.GetResponsePaginate(s.computeAPI, "tasks", query) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var tasks ScalewayTasks - - if err = json.Unmarshal(body, &tasks); err != nil { - return nil, err - } - return &tasks.Tasks, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user.go deleted file mode 100644 index 3a4371f6bad6..000000000000 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user.go +++ /dev/null @@ -1,121 +0,0 @@ -package api - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" -) - -// ScalewayTokenDefinition represents a Scaleway Token -type ScalewayTokenDefinition struct { - UserID string `json:"user_id"` - Description string `json:"description,omitempty"` - Roles ScalewayRoleDefinition `json:"roles"` - Expires string `json:"expires"` - InheritsUsersPerms bool `json:"inherits_user_perms"` - ID string `json:"id"` -} - -// ScalewayTokensDefinition represents a Scaleway Tokens -type ScalewayTokensDefinition struct { - Token ScalewayTokenDefinition `json:"token"` -} - -// ScalewayGetTokens represents a list of Scaleway Tokens -type ScalewayGetTokens struct { - Tokens []ScalewayTokenDefinition `json:"tokens"` -} - -// ScalewayRoleDefinition represents a Scaleway Token UserId Role -type ScalewayRoleDefinition struct { - Organization ScalewayOrganizationDefinition `json:"organization,omitempty"` - Role string `json:"role,omitempty"` -} - -// ScalewayUserDefinition represents a Scaleway User -type ScalewayUserDefinition struct { - Email string `json:"email"` - Firstname string `json:"firstname"` - Fullname string `json:"fullname"` - ID string `json:"id"` - Lastname string `json:"lastname"` - Organizations []ScalewayOrganizationDefinition `json:"organizations"` - Roles []ScalewayRoleDefinition `json:"roles"` - SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` -} - -// ScalewayKeyDefinition represents a key -type ScalewayKeyDefinition struct { - Key string `json:"key"` - Fingerprint string `json:"fingerprint,omitempty"` -} - -// ScalewayUsersDefinition represents the response of a GET /user -type ScalewayUsersDefinition struct { - User ScalewayUserDefinition `json:"user"` -} - -// ScalewayUserPatchSSHKeyDefinition represents a User Patch -type ScalewayUserPatchSSHKeyDefinition struct { - SSHPublicKeys []ScalewayKeyDefinition `json:"ssh_public_keys"` -} - -// PatchUserSSHKey updates a user -func (s *ScalewayAPI) PatchUserSSHKey(UserID string, definition ScalewayUserPatchSSHKeyDefinition) error { - resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition) - if err != nil { - return err - } - - defer resp.Body.Close() - - if _, err := s.handleHTTPError([]int{http.StatusOK}, resp); err != nil { - return err - } - return nil -} - -// GetUserID returns the userID -func (s *ScalewayAPI) GetUserID() (string, error) { - resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", s.Token), url.Values{}) - if err != nil { - return "", err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return "", err - } - var token ScalewayTokensDefinition - - if err = json.Unmarshal(body, &token); err != nil { - return "", err - } - return token.Token.UserID, nil -} - -// GetUser returns the user -func (s *ScalewayAPI) GetUser() (*ScalewayUserDefinition, error) { - userID, err := s.GetUserID() - if err != nil { - return nil, err - } - resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("users/%s", userID), url.Values{}) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return nil, err - } - var user ScalewayUsersDefinition - - if err = json.Unmarshal(body, &user); err != nil { - return nil, err - } - return &user.User, nil -} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/availability.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/availability.go new file mode 100644 index 000000000000..58977127d0dc --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/availability.go @@ -0,0 +1,50 @@ +package api + +import ( + "encoding/json" + "fmt" + "io/ioutil" +) + +type InstanceTypeAvailability string + +var ( + InstanceTypeAvailable InstanceTypeAvailability = "available" + InstanceTypeScarce InstanceTypeAvailability = "scarce" + InstanceTypeShortage InstanceTypeAvailability = "shortage" +) + +type ServerAvailability struct { + Availability InstanceTypeAvailability `json:"availability"` +} + +type ServerAvailabilities map[string]ServerAvailability + +func (a ServerAvailabilities) CommercialTypes() []string { + types := []string{} + for k, _ := range a { + types = append(types, k) + } + return types +} + +type availabilityResponse struct { + Servers ServerAvailabilities +} + +func (s *API) GetServerAvailabilities() (ServerAvailabilities, error) { + resp, err := s.response("GET", fmt.Sprintf("%s/products/servers/availability", s.computeAPI), nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + bs, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + content := availabilityResponse{} + if err := json.Unmarshal(bs, &content); err != nil { + return nil, err + } + return content.Servers, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/bootscript.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go similarity index 63% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/bootscript.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go index 67849c6946b0..b10ced6e3ca9 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/bootscript.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/bootscript.go @@ -6,8 +6,8 @@ import ( "net/url" ) -// ScalewayBootscript represents a Scaleway Bootscript -type ScalewayBootscript struct { +// Bootscript represents a Bootscript +type Bootscript struct { Bootcmdargs string `json:"bootcmdargs,omitempty"` Dtb string `json:"dtb,omitempty"` Initrd string `json:"initrd,omitempty"` @@ -31,19 +31,16 @@ type ScalewayBootscript struct { Default bool `json:"default,omitempty"` } -// ScalewayOneBootscript represents the response of a GET /bootscripts/UUID API call -type ScalewayOneBootscript struct { - Bootscript ScalewayBootscript `json:"bootscript,omitempty"` +type getBootscriptResponse struct { + Bootscript Bootscript `json:"bootscript,omitempty"` } -// ScalewayBootscripts represents a group of Scaleway bootscripts -type ScalewayBootscripts struct { - // Bootscripts holds Scaleway bootscripts of the response - Bootscripts []ScalewayBootscript `json:"bootscripts,omitempty"` +type getBootscriptsResponse struct { + Bootscripts []Bootscript `json:"bootscripts,omitempty"` } -// GetBootscripts gets the list of bootscripts from the ScalewayAPI -func (s *ScalewayAPI) GetBootscripts() (*[]ScalewayBootscript, error) { +// GetBootscripts gets the list of bootscripts from the API +func (s *API) GetBootscripts() ([]Bootscript, error) { query := url.Values{} resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts", query) @@ -56,16 +53,16 @@ func (s *ScalewayAPI) GetBootscripts() (*[]ScalewayBootscript, error) { if err != nil { return nil, err } - var bootscripts ScalewayBootscripts + var bootscripts getBootscriptsResponse if err = json.Unmarshal(body, &bootscripts); err != nil { return nil, err } - return &bootscripts.Bootscripts, nil + return bootscripts.Bootscripts, nil } -// GetBootscript gets a bootscript from the ScalewayAPI -func (s *ScalewayAPI) GetBootscript(bootscriptID string) (*ScalewayBootscript, error) { +// GetBootscript gets a bootscript from the API +func (s *API) GetBootscript(bootscriptID string) (*Bootscript, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "bootscripts/"+bootscriptID, url.Values{}) if err != nil { return nil, err @@ -76,7 +73,7 @@ func (s *ScalewayAPI) GetBootscript(bootscriptID string) (*ScalewayBootscript, e if err != nil { return nil, err } - var oneBootscript ScalewayOneBootscript + var oneBootscript getBootscriptResponse if err = json.Unmarshal(body, &oneBootscript); err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/container.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/container.go new file mode 100644 index 000000000000..e7a5abc49e9b --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/container.go @@ -0,0 +1,70 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// ContainerData represents a container data (S3) +type ContainerData struct { + LastModified string `json:"last_modified"` + Name string `json:"name"` + Size string `json:"size"` +} + +type getContainerDatas struct { + Container []*ContainerData `json:"container"` +} + +// Container represents a container (S3) +type Container struct { + Organization `json:"organization"` + Name string `json:"name"` + Size string `json:"size"` +} + +type getContainers struct { + Containers []*Container `json:"containers"` +} + +// GetContainers returns a GetContainers +func (s *API) GetContainers() ([]*Container, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "containers", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var containers getContainers + + if err = json.Unmarshal(body, &containers); err != nil { + return nil, err + } + return containers.Containers, nil +} + +// GetContainerDatas returns a GetContainerDatas +func (s *API) GetContainerDatas(container string) ([]*ContainerData, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("containers/%s", container), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var datas getContainerDatas + + if err = json.Unmarshal(body, &datas); err != nil { + return nil, err + } + return datas.Container, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/dashboard.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go similarity index 70% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/dashboard.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go index 6835ab4b4010..a66cea6faa1c 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/dashboard.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/dashboard.go @@ -6,13 +6,13 @@ import ( "net/url" ) -// ScalewayDashboardResp represents a dashboard received from the API -type ScalewayDashboardResp struct { - Dashboard ScalewayDashboard +// DashboardResp represents a dashboard received from the API +type DashboardResp struct { + Dashboard Dashboard } -// ScalewayDashboard represents a dashboard -type ScalewayDashboard struct { +// Dashboard represents a dashboard +type Dashboard struct { VolumesCount int `json:"volumes_count"` RunningServersCount int `json:"running_servers_count"` ImagesCount int `json:"images_count"` @@ -22,7 +22,7 @@ type ScalewayDashboard struct { } // GetDashboard returns the dashboard -func (s *ScalewayAPI) GetDashboard() (*ScalewayDashboard, error) { +func (s *API) GetDashboard() (*Dashboard, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "dashboard", url.Values{}) if err != nil { return nil, err @@ -33,7 +33,7 @@ func (s *ScalewayAPI) GetDashboard() (*ScalewayDashboard, error) { if err != nil { return nil, err } - var dashboard ScalewayDashboardResp + var dashboard DashboardResp if err = json.Unmarshal(body, &dashboard); err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/image.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/image.go similarity index 78% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/image.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/image.go index 0a2fba1819e8..e8e644e32182 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/image.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/image.go @@ -7,8 +7,8 @@ import ( "net/url" ) -// ScalewayImageDefinition represents a Scaleway image definition -type ScalewayImageDefinition struct { +// ImageDefinition represents a image definition +type ImageDefinition struct { SnapshotIDentifier string `json:"root_volume"` Name string `json:"name,omitempty"` Organization string `json:"organization"` @@ -16,8 +16,8 @@ type ScalewayImageDefinition struct { DefaultBootscript *string `json:"default_bootscript,omitempty"` } -// ScalewayImage represents a Scaleway Image -type ScalewayImage struct { +// Image represents a Image +type Image struct { // Identifier is a unique identifier for the image Identifier string `json:"id,omitempty"` @@ -31,13 +31,13 @@ type ScalewayImage struct { ModificationDate string `json:"modification_date,omitempty"` // RootVolume is the root volume bound to the image - RootVolume ScalewayVolume `json:"root_volume,omitempty"` + RootVolume Volume `json:"root_volume,omitempty"` // Public is true for public images and false for user images Public bool `json:"public,omitempty"` // Bootscript is the bootscript bound to the image - DefaultBootscript *ScalewayBootscript `json:"default_bootscript,omitempty"` + DefaultBootscript *Bootscript `json:"default_bootscript,omitempty"` // Organization is the owner of the image Organization string `json:"organization,omitempty"` @@ -48,23 +48,23 @@ type ScalewayImage struct { // FIXME: extra_volumes } -// ScalewayImageIdentifier represents a Scaleway Image Identifier -type ScalewayImageIdentifier struct { +// ImageIdentifier represents a Image Identifier +type ImageIdentifier struct { Identifier string Arch string Region string Owner string } -// ScalewayOneImage represents the response of a GET /images/UUID API call -type ScalewayOneImage struct { - Image ScalewayImage `json:"image,omitempty"` +// OneImage represents the response of a GET /images/UUID API call +type OneImage struct { + Image Image `json:"image,omitempty"` } -// ScalewayImages represents a group of Scaleway images -type ScalewayImages struct { - // Images holds scaleway images of the response - Images []ScalewayImage `json:"images,omitempty"` +// Images represents a group of images +type Images struct { + // Images holds images of the response + Images []Image `json:"images,omitempty"` } // MarketImages represents MarketPlace images @@ -130,9 +130,9 @@ type MarketImage struct { MarketVersions } -// PostImage creates a new image -func (s *ScalewayAPI) PostImage(volumeID string, name string, bootscript string, arch string) (string, error) { - definition := ScalewayImageDefinition{ +// CreateImage creates a new image +func (s *API) CreateImage(volumeID string, name string, bootscript string, arch string) (*Image, error) { + definition := ImageDefinition{ SnapshotIDentifier: volumeID, Name: name, Organization: s.Organization, @@ -144,24 +144,24 @@ func (s *ScalewayAPI) PostImage(volumeID string, name string, bootscript string, resp, err := s.PostResponse(s.computeAPI, "images", definition) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) if err != nil { - return "", err + return nil, err } - var image ScalewayOneImage + var image OneImage if err = json.Unmarshal(body, &image); err != nil { - return "", err + return nil, err } - return image.Image.Identifier, nil + return &image.Image, nil } -// GetImages gets the list of images from the ScalewayAPI -func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { +// GetImages gets the list of images from the API +func (s *API) GetImages() (*[]MarketImage, error) { images, err := s.GetMarketPlaceImages("") if err != nil { return nil, err @@ -187,7 +187,7 @@ func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { if err != nil { return nil, err } - var OrgaImages ScalewayImages + var OrgaImages Images if err = json.Unmarshal(body, &OrgaImages); err != nil { return nil, err @@ -225,8 +225,8 @@ func (s *ScalewayAPI) GetImages() (*[]MarketImage, error) { return &images.Images, nil } -// GetImage gets an image from the ScalewayAPI -func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { +// GetImage gets an image from the API +func (s *API) GetImage(imageID string) (*Image, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "images/"+imageID, url.Values{}) if err != nil { return nil, err @@ -237,7 +237,7 @@ func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { if err != nil { return nil, err } - var oneImage ScalewayOneImage + var oneImage OneImage if err = json.Unmarshal(body, &oneImage); err != nil { return nil, err @@ -247,7 +247,7 @@ func (s *ScalewayAPI) GetImage(imageID string) (*ScalewayImage, error) { } // DeleteImage deletes a image -func (s *ScalewayAPI) DeleteImage(imageID string) error { +func (s *API) DeleteImage(imageID string) error { resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("images/%s", imageID)) if err != nil { return err @@ -261,7 +261,7 @@ func (s *ScalewayAPI) DeleteImage(imageID string) error { } // GetMarketPlaceImages returns images from marketplace -func (s *ScalewayAPI) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) { +func (s *API) GetMarketPlaceImages(uuidImage string) (*MarketImages, error) { resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%s", uuidImage), url.Values{}) if err != nil { return nil, err @@ -292,7 +292,7 @@ func (s *ScalewayAPI) GetMarketPlaceImages(uuidImage string) (*MarketImages, err } // GetMarketPlaceImageVersions returns image version -func (s *ScalewayAPI) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) { +func (s *API) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) (*MarketVersions, error) { resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s", uuidImage, uuidVersion), url.Values{}) if err != nil { return nil, err @@ -322,7 +322,7 @@ func (s *ScalewayAPI) GetMarketPlaceImageVersions(uuidImage, uuidVersion string) } // GetMarketPlaceImageCurrentVersion return the image current version -func (s *ScalewayAPI) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) { +func (s *API) GetMarketPlaceImageCurrentVersion(uuidImage string) (*MarketVersion, error) { resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/current", uuidImage), url.Values{}) if err != nil { return nil, err @@ -342,7 +342,7 @@ func (s *ScalewayAPI) GetMarketPlaceImageCurrentVersion(uuidImage string) (*Mark } // GetMarketPlaceLocalImages returns images from local region -func (s *ScalewayAPI) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) { +func (s *API) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLocalImage string) (*MarketLocalImages, error) { resp, err := s.GetResponsePaginate(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%s", uuidImage, uuidVersion, uuidLocalImage), url.Values{}) if err != nil { return nil, err @@ -369,8 +369,8 @@ func (s *ScalewayAPI) GetMarketPlaceLocalImages(uuidImage, uuidVersion, uuidLoca return &ret, nil } -// PostMarketPlaceImage adds new image -func (s *ScalewayAPI) PostMarketPlaceImage(images MarketImage) error { +// CreateMarketPlaceImage adds new image +func (s *API) CreateMarketPlaceImage(images MarketImage) error { resp, err := s.PostResponse(MarketplaceAPI, "images/", images) if err != nil { return err @@ -380,8 +380,8 @@ func (s *ScalewayAPI) PostMarketPlaceImage(images MarketImage) error { return err } -// PostMarketPlaceImageVersion adds new image version -func (s *ScalewayAPI) PostMarketPlaceImageVersion(uuidImage string, version MarketVersion) error { +// CreateMarketPlaceImageVersion adds new image version +func (s *API) CreateMarketPlaceImageVersion(uuidImage string, version MarketVersion) error { resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions", uuidImage), version) if err != nil { return err @@ -391,8 +391,8 @@ func (s *ScalewayAPI) PostMarketPlaceImageVersion(uuidImage string, version Mark return err } -// PostMarketPlaceLocalImage adds new local image -func (s *ScalewayAPI) PostMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { +// CreateMarketPlaceLocalImage adds new local image +func (s *API) CreateMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) if err != nil { return err @@ -402,8 +402,8 @@ func (s *ScalewayAPI) PostMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLoca return err } -// PutMarketPlaceImage updates image -func (s *ScalewayAPI) PutMarketPlaceImage(uudiImage string, images MarketImage) error { +// UpdateMarketPlaceImage updates image +func (s *API) UpdateMarketPlaceImage(uudiImage string, images MarketImage) error { resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudiImage), images) if err != nil { return err @@ -413,8 +413,8 @@ func (s *ScalewayAPI) PutMarketPlaceImage(uudiImage string, images MarketImage) return err } -// PutMarketPlaceImageVersion updates image version -func (s *ScalewayAPI) PutMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error { +// UpdateMarketPlaceImageVersion updates image version +func (s *API) UpdateMarketPlaceImageVersion(uuidImage, uuidVersion string, version MarketVersion) error { resp, err := s.PutResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion), version) if err != nil { return err @@ -424,8 +424,8 @@ func (s *ScalewayAPI) PutMarketPlaceImageVersion(uuidImage, uuidVersion string, return err } -// PutMarketPlaceLocalImage updates local image -func (s *ScalewayAPI) PutMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { +// UpdateMarketPlaceLocalImage updates local image +func (s *API) UpdateMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string, local MarketLocalImage) error { resp, err := s.PostResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage), local) if err != nil { return err @@ -436,7 +436,7 @@ func (s *ScalewayAPI) PutMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocal } // DeleteMarketPlaceImage deletes image -func (s *ScalewayAPI) DeleteMarketPlaceImage(uudImage string) error { +func (s *API) DeleteMarketPlaceImage(uudImage string) error { resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v", uudImage)) if err != nil { return err @@ -447,7 +447,7 @@ func (s *ScalewayAPI) DeleteMarketPlaceImage(uudImage string) error { } // DeleteMarketPlaceImageVersion delete image version -func (s *ScalewayAPI) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error { +func (s *API) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion string) error { resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%v", uuidImage, uuidVersion)) if err != nil { return err @@ -458,7 +458,7 @@ func (s *ScalewayAPI) DeleteMarketPlaceImageVersion(uuidImage, uuidVersion strin } // DeleteMarketPlaceLocalImage deletes local image -func (s *ScalewayAPI) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error { +func (s *API) DeleteMarketPlaceLocalImage(uuidImage, uuidVersion, uuidLocalImage string) error { resp, err := s.DeleteResponse(MarketplaceAPI, fmt.Sprintf("images/%v/versions/%s/local_images/%v", uuidImage, uuidVersion, uuidLocalImage)) if err != nil { return err diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/ip.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/ip.go similarity index 59% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/ip.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/ip.go index cd746453ce3f..04d1dd0205a1 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/ip.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/ip.go @@ -7,15 +7,15 @@ import ( "net/url" ) -// ScalewayIPV6Definition represents a Scaleway ipv6 -type ScalewayIPV6Definition struct { +// IPV6 represents a ipv6 +type IPV6 struct { Netmask string `json:"netmask"` Gateway string `json:"gateway"` Address string `json:"address"` } -// ScalewayIPDefinition represents the IP's fields -type ScalewayIPDefinition struct { +// IPV4 represents the IPs fields +type IPV4 struct { Organization string `json:"organization"` Reverse *string `json:"reverse"` ID string `json:"id"` @@ -26,8 +26,8 @@ type ScalewayIPDefinition struct { Address string `json:"address"` } -// ScalewayIPAddress represents a Scaleway IP address -type ScalewayIPAddress struct { +// IPAddress represents a IP address +type IPAddress struct { // Identifier is a unique identifier for the IP address Identifier string `json:"id,omitempty"` @@ -38,18 +38,18 @@ type ScalewayIPAddress struct { Dynamic *bool `json:"dynamic,omitempty"` } -// ScalewayGetIPS represents the response of a GET /ips/ -type ScalewayGetIPS struct { - IPS []ScalewayIPDefinition `json:"ips"` +// GetIPS represents the response of a GET /ips/ +type GetIPS struct { + IPS []IPV4 `json:"ips"` } -// ScalewayGetIP represents the response of a GET /ips/{id_ip} -type ScalewayGetIP struct { - IP ScalewayIPDefinition `json:"ip"` +// GetIP represents the response of a GET /ips/{id_ip} +type GetIP struct { + IP IPV4 `json:"ip"` } -// GetIP returns a ScalewayGetIP -func (s *ScalewayAPI) GetIP(ipID string) (*ScalewayGetIP, error) { +// GetIP returns a GetIP +func (s *API) GetIP(ipID string) (*IPV4, error) { resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("ips/%s", ipID), url.Values{}) if err != nil { return nil, err @@ -60,16 +60,59 @@ func (s *ScalewayAPI) GetIP(ipID string) (*ScalewayGetIP, error) { if err != nil { return nil, err } - var ip ScalewayGetIP + var ip GetIP if err = json.Unmarshal(body, &ip); err != nil { return nil, err } - return &ip, nil + return &ip.IP, nil } -// GetIPS returns a ScalewayGetIPS -func (s *ScalewayAPI) GetIPS() (*ScalewayGetIPS, error) { +type UpdateIPRequest struct { + ID string + Reverse string +} + +func (s *API) UpdateIP(req UpdateIPRequest) (*IPV4, error) { + var update struct { + Address string `json:"address"` + ID string `json:"id"` + Reverse *string `json:"reverse"` + Organization string `json:"organization"` + Server *string `json:"server"` + } + + ip, err := s.GetIP(req.ID) + if err != nil { + return nil, err + } + update.Address = ip.Address + update.ID = ip.ID + update.Organization = ip.Organization + update.Server = nil + if ip.Server != nil { + update.Server = &ip.Server.Identifier + } + update.Reverse = nil + if req.Reverse != "" { + update.Reverse = &req.Reverse + } + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", req.ID), update) + if err != nil { + return nil, err + } + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + var data GetIP + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.IP, nil +} + +// GetIPS returns a GetIPS +func (s *API) GetIPS() ([]IPV4, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "ips", url.Values{}) if err != nil { return nil, err @@ -80,16 +123,16 @@ func (s *ScalewayAPI) GetIPS() (*ScalewayGetIPS, error) { if err != nil { return nil, err } - var ips ScalewayGetIPS + var ips GetIPS if err = json.Unmarshal(body, &ips); err != nil { return nil, err } - return &ips, nil + return ips.IPS, nil } -// NewIP returns a new IP -func (s *ScalewayAPI) NewIP() (*ScalewayGetIP, error) { +// CreateIP returns a new IP +func (s *API) CreateIP() (*IPV4, error) { var orga struct { Organization string `json:"organization"` } @@ -104,16 +147,16 @@ func (s *ScalewayAPI) NewIP() (*ScalewayGetIP, error) { if err != nil { return nil, err } - var ip ScalewayGetIP + var ip GetIP if err = json.Unmarshal(body, &ip); err != nil { return nil, err } - return &ip, nil + return &ip.IP, nil } // AttachIP attachs an IP to a server -func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { +func (s *API) AttachIP(ipID, serverID string) error { var update struct { Address string `json:"address"` ID string `json:"id"` @@ -126,9 +169,9 @@ func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { if err != nil { return err } - update.Address = ip.IP.Address - update.ID = ip.IP.ID - update.Organization = ip.IP.Organization + update.Address = ip.Address + update.ID = ip.ID + update.Organization = ip.Organization update.Server = serverID resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), update) if err != nil { @@ -139,13 +182,13 @@ func (s *ScalewayAPI) AttachIP(ipID, serverID string) error { } // DetachIP detaches an IP from a server -func (s *ScalewayAPI) DetachIP(ipID string) error { +func (s *API) DetachIP(ipID string) error { ip, err := s.GetIP(ipID) if err != nil { return err } - ip.IP.Server = nil - resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), ip.IP) + ip.Server = nil + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID), ip) if err != nil { return err } @@ -155,7 +198,7 @@ func (s *ScalewayAPI) DetachIP(ipID string) error { } // DeleteIP deletes an IP -func (s *ScalewayAPI) DeleteIP(ipID string) error { +func (s *API) DeleteIP(ipID string) error { resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("ips/%s", ipID)) if err != nil { return err diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/organization.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/organization.go new file mode 100644 index 000000000000..7110edb590e6 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/organization.go @@ -0,0 +1,39 @@ +package api + +import ( + "encoding/json" + "net/http" + "net/url" +) + +// Organization represents a Organization +type Organization struct { + ID string `json:"id"` + Name string `json:"name"` + Users []User `json:"users"` +} + +// organizationsDefinition represents a Organizations +type organizationsDefinition struct { + Organizations []Organization `json:"organizations"` +} + +// GetOrganization returns Organization +func (s *API) GetOrganization() ([]Organization, error) { + resp, err := s.GetResponsePaginate(AccountAPI, "organizations", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data organizationsDefinition + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return data.Organizations, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/permissions.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/permissions.go new file mode 100644 index 000000000000..5e493ef69eee --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/permissions.go @@ -0,0 +1,39 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// Permissions represents the response of GET /permissions +type Permissions map[string]PermCategory + +// PermCategory represents Permissions's fields +type PermCategory map[string][]string + +// permissions represents the permissions +type permissionsResponse struct { + Permissions Permissions `json:"permissions"` +} + +// GetPermissions returns the permissions +func (s *API) GetPermissions() (Permissions, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s/permissions", s.Token), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var permissions permissionsResponse + + if err = json.Unmarshal(body, &permissions); err != nil { + return nil, err + } + return permissions.Permissions, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/quota.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/quota.go similarity index 51% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/quota.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/quota.go index a83023670e8c..0ee7cff0066b 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/quota.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/quota.go @@ -7,16 +7,16 @@ import ( "net/url" ) -// ScalewayQuota represents a map of quota (name, value) -type ScalewayQuota map[string]int +// Quota represents a map of quota (name, value) +type Quotas map[string]int -// ScalewayGetQuotas represents the response of GET /organizations/{orga_id}/quotas -type ScalewayGetQuotas struct { - Quotas ScalewayQuota `json:"quotas"` +// GetQuotas represents the response of GET /organizations/{orga_id}/quotas +type GetQuotas struct { + Quotas Quotas `json:"quotas"` } -// GetQuotas returns a ScalewayGetQuotas -func (s *ScalewayAPI) GetQuotas() (*ScalewayGetQuotas, error) { +// GetQuotas returns a GetQuotas +func (s *API) GetQuotas() (Quotas, error) { resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("organizations/%s/quotas", s.Organization), url.Values{}) if err != nil { return nil, err @@ -27,10 +27,10 @@ func (s *ScalewayAPI) GetQuotas() (*ScalewayGetQuotas, error) { if err != nil { return nil, err } - var quotas ScalewayGetQuotas + var quotas GetQuotas if err = json.Unmarshal(body, "as); err != nil { return nil, err } - return "as, nil + return quotas.Quotas, nil } diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group.go new file mode 100644 index 000000000000..d0bdb6dc07e4 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group.go @@ -0,0 +1,146 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// SecurityGroup definition +type SecurityGroup struct { + Description string `json:"description"` + ID string `json:"id"` + Organization string `json:"organization"` + Name string `json:"name"` + Servers []ServerRef `json:"servers"` + EnableDefaultSecurity bool `json:"enable_default_security"` + OrganizationDefault bool `json:"organization_default"` +} + +type SecurityGroupRef struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +type ServerRef struct { + Identifier string `json:"id,omitempty"` + Name string `json:"name,omitempty"` +} + +// getSecurityGroups represents the response of a GET /security_groups/ +type getSecurityGroups struct { + SecurityGroups []SecurityGroup `json:"security_groups"` +} + +// getSecurityGroup represents the response of a GET /security_groups/{groupID} +type getSecurityGroup struct { + SecurityGroup SecurityGroup `json:"security_group"` +} + +// NewSecurityGroup definition POST request /security_groups +type NewSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + EnableDefaultSecurity bool `json:"enable_default_security"` +} + +// UpdateSecurityGroup definition PUT request /security_groups +type UpdateSecurityGroup struct { + Organization string `json:"organization"` + Name string `json:"name"` + Description string `json:"description"` + OrganizationDefault bool `json:"organization_default"` +} + +// DeleteSecurityGroup deletes a SecurityGroup +func (s *API) DeleteSecurityGroup(securityGroupID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} + +// UpdateSecurityGroup updates a SecurityGroup +func (s *API) UpdateSecurityGroup(group UpdateSecurityGroup, securityGroupID string) (*SecurityGroup, error) { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s", securityGroupID), group) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data getSecurityGroup + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.SecurityGroup, err +} + +// GetSecurityGroup returns a SecurityGroup +func (s *API) GetSecurityGroup(groupsID string) (*SecurityGroup, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s", groupsID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups getSecurityGroup + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups.SecurityGroup, nil +} + +// CreateSecurityGroup posts a group on a server +func (s *API) CreateSecurityGroup(group NewSecurityGroup) (*SecurityGroup, error) { + resp, err := s.PostResponse(s.computeAPI, "security_groups", group) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return nil, err + } + var securityGroups getSecurityGroup + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return &securityGroups.SecurityGroup, nil +} + +// GetSecurityGroups returns a SecurityGroups +func (s *API) GetSecurityGroups() ([]SecurityGroup, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "security_groups", url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var securityGroups getSecurityGroups + + if err = json.Unmarshal(body, &securityGroups); err != nil { + return nil, err + } + return securityGroups.SecurityGroups, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go new file mode 100644 index 000000000000..492e3cefe890 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/security_group_rule.go @@ -0,0 +1,140 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// SecurityGroupRule definition +type SecurityGroupRule struct { + Direction string `json:"direction"` + Protocol string `json:"protocol"` + IPRange string `json:"ip_range"` + DestPortFrom int `json:"dest_port_from,omitempty"` + Action string `json:"action"` + Position int `json:"position"` + DestPortTo string `json:"dest_port_to"` + Editable bool `json:"editable"` + ID string `json:"id"` +} + +// getSecurityGroupRules represents the response of a GET /_group/{groupID}/rules +type getSecurityGroupRules struct { + Rules []SecurityGroupRule `json:"rules"` +} + +// getSecurityGroupRule represents the response of a GET /_group/{groupID}/rules/{ruleID} +type getSecurityGroupRule struct { + Rules SecurityGroupRule `json:"rule"` +} + +// NewSecurityGroupRule definition POST/PUT request /_group/{groupID} +type NewSecurityGroupRule struct { + Action string `json:"action"` + Direction string `json:"direction"` + IPRange string `json:"ip_range"` + Protocol string `json:"protocol"` + DestPortFrom int `json:"dest_port_from,omitempty"` +} + +// UpdateSecurityGroupRule definition POST/PUT request /_group/{groupID} +type UpdateSecurityGroupRule struct { + Action string `json:"action"` + Direction string `json:"direction"` + IPRange string `json:"ip_range"` + Protocol string `json:"protocol"` + Position int `json:"position"` + DestPortFrom int `json:"dest_port_from,omitempty"` +} + +// GetSecurityGroupRules returns a GroupRules +func (s *API) GetSecurityGroupRules(groupID string) ([]SecurityGroupRule, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", groupID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data getSecurityGroupRules + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return data.Rules, nil +} + +// GetASecurityGroupRule returns a SecurityGroupRule +func (s *API) GetSecurityGroupRule(groupID string, rulesID string) (*SecurityGroupRule, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", groupID, rulesID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data getSecurityGroupRule + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.Rules, nil +} + +type postGroupRuleResponse struct { + SecurityGroupRule SecurityGroupRule `json:"rule"` +} + +// CreateSecurityGroupRule posts a rule on a server +func (s *API) CreateSecurityGroupRule(GroupID string, rules NewSecurityGroupRule) (*SecurityGroupRule, error) { + resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules", GroupID), rules) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + data, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return nil, err + } + var res postGroupRuleResponse + err = json.Unmarshal(data, &res) + return &res.SecurityGroupRule, err +} + +// UpdateSecurityGroupRule updates a SecurityGroupRule +func (s *API) UpdateSecurityGroupRule(rules UpdateSecurityGroupRule, GroupID, RuleID string) (*SecurityGroupRule, error) { + resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", GroupID, RuleID), rules) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var res postGroupRuleResponse + err = json.Unmarshal(body, &res) + return &res.SecurityGroupRule, err +} + +// DeleteSecurityGroupRule deletes a SecurityGroupRule +func (s *API) DeleteSecurityGroupRule(GroupID, RuleID string) error { + resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("security_groups/%s/rules/%s", GroupID, RuleID)) + if err != nil { + return err + } + defer resp.Body.Close() + + _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp) + return err +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/server.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/server.go similarity index 55% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/server.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/server.go index 53fa6b45264c..6e76dfa85231 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/server.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/server.go @@ -6,12 +6,10 @@ import ( "net/http" "net/url" "time" - - "golang.org/x/sync/errgroup" ) -// ScalewayServer represents a Scaleway server -type ScalewayServer struct { +// Server represents a server +type Server struct { // Arch is the architecture target of the server Arch string `json:"arch,omitempty"` @@ -28,13 +26,13 @@ type ScalewayServer struct { ModificationDate string `json:"modification_date,omitempty"` // Image is the image used by the server - Image ScalewayImage `json:"image,omitempty"` + Image Image `json:"image,omitempty"` // DynamicIPRequired is a flag that defines a server with a dynamic ip address attached DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` // PublicIP is the public IP address bound to the server - PublicAddress ScalewayIPAddress `json:"public_ip,omitempty"` + PublicAddress IPAddress `json:"public_ip,omitempty"` // State is the current status of the server State string `json:"state,omitempty"` @@ -46,7 +44,10 @@ type ScalewayServer struct { PrivateIP string `json:"private_ip,omitempty"` // Bootscript is the unique identifier of the selected bootscript - Bootscript *ScalewayBootscript `json:"bootscript,omitempty"` + Bootscript *Bootscript `json:"bootscript,omitempty"` + + // BootType defines the type of boot. Can be local or bootscript + BootType string `json:"boot_type,omitempty"` // Hostname represents the ServerName in a format compatible with unix's hostname Hostname string `json:"hostname,omitempty"` @@ -55,10 +56,10 @@ type ScalewayServer struct { Tags []string `json:"tags,omitempty"` // Volumes are the attached volumes - Volumes map[string]ScalewayVolume `json:"volumes,omitempty"` + Volumes map[string]Volume `json:"volumes,omitempty"` // SecurityGroup is the selected security group object - SecurityGroup ScalewaySecurityGroup `json:"security_group,omitempty"` + SecurityGroup SecurityGroupRef `json:"security_group,omitempty"` // Organization is the owner of the server Organization string `json:"organization,omitempty"` @@ -77,7 +78,7 @@ type ScalewayServer struct { ZoneID string `json:"zone_id,omitempty"` } `json:"location,omitempty"` - IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` + IPV6 *IPV6 `json:"ipv6,omitempty"` EnableIPV6 bool `json:"enable_ipv6,omitempty"` @@ -86,30 +87,30 @@ type ScalewayServer struct { DNSPrivate string `json:"dns_private,omitempty"` } -// ScalewayServerPatchDefinition represents a Scaleway server with nullable fields (for PATCH) -type ScalewayServerPatchDefinition struct { - Arch *string `json:"arch,omitempty"` - Name *string `json:"name,omitempty"` - CreationDate *string `json:"creation_date,omitempty"` - ModificationDate *string `json:"modification_date,omitempty"` - Image *ScalewayImage `json:"image,omitempty"` - DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` - PublicAddress *ScalewayIPAddress `json:"public_ip,omitempty"` - State *string `json:"state,omitempty"` - StateDetail *string `json:"state_detail,omitempty"` - PrivateIP *string `json:"private_ip,omitempty"` - Bootscript *string `json:"bootscript,omitempty"` - Hostname *string `json:"hostname,omitempty"` - Volumes *map[string]ScalewayVolume `json:"volumes,omitempty"` - SecurityGroup *ScalewaySecurityGroup `json:"security_group,omitempty"` - Organization *string `json:"organization,omitempty"` - Tags *[]string `json:"tags,omitempty"` - IPV6 *ScalewayIPV6Definition `json:"ipv6,omitempty"` - EnableIPV6 *bool `json:"enable_ipv6,omitempty"` +// ServerPatchDefinition represents a server with nullable fields (for PATCH) +type ServerPatchDefinition struct { + Arch *string `json:"arch,omitempty"` + Name *string `json:"name,omitempty"` + CreationDate *string `json:"creation_date,omitempty"` + ModificationDate *string `json:"modification_date,omitempty"` + Image *Image `json:"image,omitempty"` + DynamicIPRequired *bool `json:"dynamic_ip_required,omitempty"` + PublicAddress *IPAddress `json:"public_ip,omitempty"` + State *string `json:"state,omitempty"` + StateDetail *string `json:"state_detail,omitempty"` + PrivateIP *string `json:"private_ip,omitempty"` + Bootscript *string `json:"bootscript,omitempty"` + Hostname *string `json:"hostname,omitempty"` + Volumes *map[string]Volume `json:"volumes,omitempty"` + SecurityGroup *SecurityGroupRef `json:"security_group,omitempty"` + Organization *string `json:"organization,omitempty"` + Tags *[]string `json:"tags,omitempty"` + IPV6 *IPV6 `json:"ipv6,omitempty"` + EnableIPV6 *bool `json:"enable_ipv6,omitempty"` } -// ScalewayServerDefinition represents a Scaleway server with image definition -type ScalewayServerDefinition struct { +// ServerDefinition represents a server with image definition +type ServerDefinition struct { // Name is the user-defined name of the server Name string `json:"name"` @@ -134,6 +135,9 @@ type ScalewayServerDefinition struct { // CommercialType is the commercial type of the server (i.e: C1, C2[SML], VC1S) CommercialType string `json:"commercial_type"` + // BootType defines the type of boot. Can be local or bootscript + BootType string `json:"boot_type,omitempty"` + PublicIP string `json:"public_ip,omitempty"` EnableIPV6 bool `json:"enable_ipv6,omitempty"` @@ -141,25 +145,25 @@ type ScalewayServerDefinition struct { SecurityGroup string `json:"security_group,omitempty"` } -// ScalewayServers represents a group of Scaleway servers -type ScalewayServers struct { - // Servers holds scaleway servers of the response - Servers []ScalewayServer `json:"servers,omitempty"` +// Servers represents a group of servers +type Servers struct { + // Servers holds servers of the response + Servers []Server `json:"servers,omitempty"` } -// ScalewayServerAction represents an action to perform on a Scaleway server -type ScalewayServerAction struct { +// ServerAction represents an action to perform on a server +type ServerAction struct { // Action is the name of the action to trigger Action string `json:"action,omitempty"` } -// ScalewayOneServer represents the response of a GET /servers/UUID API call -type ScalewayOneServer struct { - Server ScalewayServer `json:"server,omitempty"` +// OneServer represents the response of a GET /servers/UUID API call +type OneServer struct { + Server Server `json:"server,omitempty"` } // PatchServer updates a server -func (s *ScalewayAPI) PatchServer(serverID string, definition ScalewayServerPatchDefinition) error { +func (s *API) PatchServer(serverID string, definition ServerPatchDefinition) error { resp, err := s.PatchResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID), definition) if err != nil { return err @@ -172,67 +176,50 @@ func (s *ScalewayAPI) PatchServer(serverID string, definition ScalewayServerPatc return nil } -// GetServers gets the list of servers from the ScalewayAPI -func (s *ScalewayAPI) GetServers(all bool, limit int) (*[]ScalewayServer, error) { +// GetServers gets the list of servers from the API +func (s *API) GetServers(all bool, limit int) ([]Server, error) { query := url.Values{} if !all { query.Set("state", "running") } + // TODO per_page=20&page=2&state=running if limit > 0 { // FIXME: wait for the API to be ready // query.Set("per_page", strconv.Itoa(limit)) panic("Not implemented yet") } - var ( - g errgroup.Group - apis = []string{ - ComputeAPIPar1, - ComputeAPIAms1, - } - ) - - serverChan := make(chan ScalewayServers, 2) - for _, api := range apis { - g.Go(s.fetchServers(api, query, serverChan)) - } - - if err := g.Wait(); err != nil { + servers, err := s.fetchServers(query) + if err != nil { return nil, err } - close(serverChan) - var servers ScalewayServers - - for server := range serverChan { - servers.Servers = append(servers.Servers, server.Servers...) - } for i, server := range servers.Servers { servers.Servers[i].DNSPublic = server.Identifier + URLPublicDNS servers.Servers[i].DNSPrivate = server.Identifier + URLPrivateDNS } - return &servers.Servers, nil + return servers.Servers, nil } -// ScalewaySortServers represents a wrapper to sort by CreationDate the servers -type ScalewaySortServers []ScalewayServer +// SortServers represents a wrapper to sort by CreationDate the servers +type SortServers []Server -func (s ScalewaySortServers) Len() int { +func (s SortServers) Len() int { return len(s) } -func (s ScalewaySortServers) Swap(i, j int) { +func (s SortServers) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s ScalewaySortServers) Less(i, j int) bool { +func (s SortServers) Less(i, j int) bool { date1, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[i].CreationDate) date2, _ := time.Parse("2006-01-02T15:04:05.000000+00:00", s[j].CreationDate) return date2.Before(date1) } -// GetServer gets a server from the ScalewayAPI -func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { +// GetServer gets a server from the API +func (s *API) GetServer(serverID string) (*Server, error) { if serverID == "" { return nil, fmt.Errorf("cannot get server without serverID") } @@ -247,7 +234,7 @@ func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { return nil, err } - var oneServer ScalewayOneServer + var oneServer OneServer if err = json.Unmarshal(body, &oneServer); err != nil { return nil, err @@ -259,44 +246,49 @@ func (s *ScalewayAPI) GetServer(serverID string) (*ScalewayServer, error) { } // PostServerAction posts an action on a server -func (s *ScalewayAPI) PostServerAction(serverID, action string) error { - data := ScalewayServerAction{ +func (s *API) PostServerAction(serverID, action string) (*Task, error) { + data := ServerAction{ Action: action, } resp, err := s.PostResponse(s.computeAPI, fmt.Sprintf("servers/%s/action", serverID), data) if err != nil { - return err + return nil, err } defer resp.Body.Close() - _, err = s.handleHTTPError([]int{http.StatusAccepted}, resp) - return err + body, err := s.handleHTTPError([]int{http.StatusAccepted}, resp) + if err != nil { + return nil, err + } + + var t oneTask + if err = json.Unmarshal(body, &t); err != nil { + return nil, err + } + return &t.Task, err } -func (s *ScalewayAPI) fetchServers(api string, query url.Values, out chan<- ScalewayServers) func() error { - return func() error { - resp, err := s.GetResponsePaginate(api, "servers", query) - if err != nil { - return err - } - defer resp.Body.Close() - - body, err := s.handleHTTPError([]int{http.StatusOK}, resp) - if err != nil { - return err - } - var servers ScalewayServers - - if err = json.Unmarshal(body, &servers); err != nil { - return err - } - out <- servers - return nil +func (s *API) fetchServers(query url.Values) (*Servers, error) { + resp, err := s.GetResponsePaginate(s.computeAPI, "servers", query) + if err != nil { + return nil, err } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var servers Servers + + if err = json.Unmarshal(body, &servers); err != nil { + return nil, err + } + return &servers, nil } // DeleteServer deletes a server -func (s *ScalewayAPI) DeleteServer(serverID string) error { +func (s *API) DeleteServer(serverID string) error { resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("servers/%s", serverID)) if err != nil { return err @@ -309,24 +301,24 @@ func (s *ScalewayAPI) DeleteServer(serverID string) error { return nil } -// PostServer creates a new server -func (s *ScalewayAPI) PostServer(definition ScalewayServerDefinition) (string, error) { +// CreateServer creates a new server +func (s *API) CreateServer(definition ServerDefinition) (*Server, error) { definition.Organization = s.Organization resp, err := s.PostResponse(s.computeAPI, "servers", definition) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) if err != nil { - return "", err + return nil, err } - var server ScalewayOneServer + var data OneServer - if err = json.Unmarshal(body, &server); err != nil { - return "", err + if err = json.Unmarshal(body, &data); err != nil { + return nil, err } - return server.Server.Identifier, nil + return &data.Server, nil } diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/snapshot.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go similarity index 64% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/snapshot.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go index 6148b7d3b10b..99571eb52832 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/snapshot.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/snapshot.go @@ -7,15 +7,15 @@ import ( "net/url" ) -// ScalewaySnapshotDefinition represents a Scaleway snapshot definition -type ScalewaySnapshotDefinition struct { +// SnapshotDefinition represents a snapshot definition +type SnapshotDefinition struct { VolumeIDentifier string `json:"volume_id"` Name string `json:"name,omitempty"` Organization string `json:"organization"` } -// ScalewaySnapshot represents a Scaleway Snapshot -type ScalewaySnapshot struct { +// Snapshot represents a Snapshot +type Snapshot struct { // Identifier is a unique identifier for the snapshot Identifier string `json:"id,omitempty"` @@ -41,47 +41,47 @@ type ScalewaySnapshot struct { VolumeType string `json:"volume_type"` // BaseVolume is the volume from which the snapshot inherits - BaseVolume ScalewayVolume `json:"base_volume,omitempty"` + BaseVolume Volume `json:"base_volume,omitempty"` } -// ScalewayOneSnapshot represents the response of a GET /snapshots/UUID API call -type ScalewayOneSnapshot struct { - Snapshot ScalewaySnapshot `json:"snapshot,omitempty"` +// oneSnapshot represents the response of a GET /snapshots/UUID API call +type oneSnapshot struct { + Snapshot Snapshot `json:"snapshot,omitempty"` } -// ScalewaySnapshots represents a group of Scaleway snapshots -type ScalewaySnapshots struct { - // Snapshots holds scaleway snapshots of the response - Snapshots []ScalewaySnapshot `json:"snapshots,omitempty"` +// Snapshots represents a group of snapshots +type Snapshots struct { + // Snapshots holds snapshots of the response + Snapshots []Snapshot `json:"snapshots,omitempty"` } -// PostSnapshot creates a new snapshot -func (s *ScalewayAPI) PostSnapshot(volumeID string, name string) (string, error) { - definition := ScalewaySnapshotDefinition{ +// CreateSnapshot creates a new snapshot +func (s *API) CreateSnapshot(volumeID string, name string) (*Snapshot, error) { + definition := SnapshotDefinition{ VolumeIDentifier: volumeID, Name: name, Organization: s.Organization, } resp, err := s.PostResponse(s.computeAPI, "snapshots", definition) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) if err != nil { - return "", err + return nil, err } - var snapshot ScalewayOneSnapshot + var snapshot oneSnapshot if err = json.Unmarshal(body, &snapshot); err != nil { - return "", err + return nil, err } - return snapshot.Snapshot.Identifier, nil + return &snapshot.Snapshot, nil } // DeleteSnapshot deletes a snapshot -func (s *ScalewayAPI) DeleteSnapshot(snapshotID string) error { +func (s *API) DeleteSnapshot(snapshotID string) error { resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("snapshots/%s", snapshotID)) if err != nil { return err @@ -94,8 +94,8 @@ func (s *ScalewayAPI) DeleteSnapshot(snapshotID string) error { return nil } -// GetSnapshots gets the list of snapshots from the ScalewayAPI -func (s *ScalewayAPI) GetSnapshots() (*[]ScalewaySnapshot, error) { +// GetSnapshots gets the list of snapshots from the API +func (s *API) GetSnapshots() ([]Snapshot, error) { query := url.Values{} resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots", query) @@ -108,16 +108,16 @@ func (s *ScalewayAPI) GetSnapshots() (*[]ScalewaySnapshot, error) { if err != nil { return nil, err } - var snapshots ScalewaySnapshots + var snapshots Snapshots if err = json.Unmarshal(body, &snapshots); err != nil { return nil, err } - return &snapshots.Snapshots, nil + return snapshots.Snapshots, nil } -// GetSnapshot gets a snapshot from the ScalewayAPI -func (s *ScalewayAPI) GetSnapshot(snapshotID string) (*ScalewaySnapshot, error) { +// GetSnapshot gets a snapshot from the API +func (s *API) GetSnapshot(snapshotID string) (*Snapshot, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "snapshots/"+snapshotID, url.Values{}) if err != nil { return nil, err @@ -128,7 +128,7 @@ func (s *ScalewayAPI) GetSnapshot(snapshotID string) (*ScalewaySnapshot, error) if err != nil { return nil, err } - var oneSnapshot ScalewayOneSnapshot + var oneSnapshot oneSnapshot if err = json.Unmarshal(body, &oneSnapshot); err != nil { return nil, err diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tasks.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tasks.go new file mode 100644 index 000000000000..70b502a077bb --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tasks.go @@ -0,0 +1,82 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// Task represents a Task +type Task struct { + // Identifier is a unique identifier for the task + Identifier string `json:"id,omitempty"` + + // StartDate is the start date of the task + StartDate string `json:"started_at,omitempty"` + + // TerminationDate is the termination date of the task + TerminationDate string `json:"terminated_at,omitempty"` + + HrefFrom string `json:"href_from,omitempty"` + + Description string `json:"description,omitempty"` + + Status string `json:"status,omitempty"` + + Progress int `json:"progress,omitempty"` +} + +// oneTask represents the response of a GET /tasks/UUID API call +type oneTask struct { + Task Task `json:"task,omitempty"` +} + +// Tasks represents a group of tasks +type Tasks struct { + // Tasks holds tasks of the response + Tasks []Task `json:"tasks,omitempty"` +} + +// GetTasks get the list of tasks from the API +func (s *API) GetTasks() ([]Task, error) { + query := url.Values{} + // TODO per_page=20&page=2 + resp, err := s.GetResponsePaginate(s.computeAPI, "tasks", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var tasks Tasks + + if err = json.Unmarshal(body, &tasks); err != nil { + return nil, err + } + return tasks.Tasks, nil +} + +// GetTask fetches a specific task +func (s *API) GetTask(id string) (*Task, error) { + query := url.Values{} + resp, err := s.GetResponsePaginate(s.computeAPI, fmt.Sprintf("tasks/%s", id), query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + + var t oneTask + if err = json.Unmarshal(body, &t); err != nil { + return nil, err + } + return &t.Task, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tokens.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tokens.go new file mode 100644 index 000000000000..b3dff56bb808 --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/tokens.go @@ -0,0 +1,135 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// Token represents a Token +type Token struct { + UserID string `json:"user_id"` + Description string `json:"description,omitempty"` + Roles Role `json:"roles"` + Expires string `json:"expires"` + InheritsUsersPerms bool `json:"inherits_user_perms"` + ID string `json:"id"` +} + +// Role represents a Token UserId Role +type Role struct { + Organization Organization `json:"organization,omitempty"` + Role string `json:"role,omitempty"` +} + +type getTokenResponse struct { + Token Token `json:"token"` +} + +type getTokensResponse struct { + Tokens []Token `json:"tokens"` +} + +func (s *API) GetTokens() ([]Token, error) { + query := url.Values{} + // TODO per_page=20&page=2 + resp, err := s.GetResponsePaginate(s.computeAPI, "tokens", query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var token getTokensResponse + + if err = json.Unmarshal(body, &token); err != nil { + return nil, err + } + return token.Tokens, nil +} + +type CreateTokenRequest struct { + Email string `json:"email"` + Password string `json:"password,omitempty"` + Expires bool `json:"expires"` +} + +func (s *API) CreateToken(req *CreateTokenRequest) (*Token, error) { + resp, err := s.PostResponse(AccountAPI, "tokens", req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) + if err != nil { + return nil, err + } + var data getTokenResponse + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.Token, nil +} + +type UpdateTokenRequest struct { + Description string `json:"description,omitempty"` + Expires bool `json:"expires"` + ID string `json:"-"` +} + +func (s *API) UpdateToken(req *UpdateTokenRequest) (*Token, error) { + resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("tokens/%s", req.ID), req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data getTokenResponse + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.Token, nil +} + +func (s *API) GetToken(id string) (*Token, error) { + query := url.Values{} + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", id), query) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var data getTokenResponse + + if err = json.Unmarshal(body, &data); err != nil { + return nil, err + } + return &data.Token, nil +} + +func (s *API) DeleteToken(id string) error { + resp, err := s.DeleteResponse(AccountAPI, fmt.Sprintf("tokens/%s", id)) + if err != nil { + return err + } + + if _, err = s.handleHTTPError([]int{http.StatusNoContent}, resp); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user.go new file mode 100644 index 000000000000..5c07cdbcdfda --- /dev/null +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user.go @@ -0,0 +1,101 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "net/url" +) + +// User represents a User +type User struct { + Email string `json:"email"` + Firstname string `json:"firstname"` + Fullname string `json:"fullname"` + ID string `json:"id"` + Lastname string `json:"lastname"` + Organizations []Organization `json:"organizations"` + Roles []Role `json:"roles"` + SSHPublicKeys []KeyDefinition `json:"ssh_public_keys"` +} + +// KeyDefinition represents a key +type KeyDefinition struct { + Key string `json:"key"` + Fingerprint string `json:"fingerprint,omitempty"` +} + +// UsersDefinition represents the response of a GET /user +type UsersDefinition struct { + User User `json:"user"` +} + +// UserPatchSSHKeyDefinition represents a User Patch +type UserPatchSSHKeyDefinition struct { + SSHPublicKeys []KeyDefinition `json:"ssh_public_keys"` +} + +// PatchUserSSHKey updates a user +func (s *API) PatchUserSSHKey(UserID string, definition UserPatchSSHKeyDefinition) (*User, error) { + resp, err := s.PatchResponse(AccountAPI, fmt.Sprintf("users/%s", UserID), definition) + if err != nil { + return nil, err + } + + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var user UsersDefinition + + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user.User, nil +} + +// GetUserID returns the userID +func (s *API) GetUserID() (string, error) { + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("tokens/%s", s.Token), url.Values{}) + if err != nil { + return "", err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return "", err + } + var token getTokenResponse + + if err = json.Unmarshal(body, &token); err != nil { + return "", err + } + return token.Token.UserID, nil +} + +// GetUser returns the user +func (s *API) GetUser() (*User, error) { + userID, err := s.GetUserID() + if err != nil { + return nil, err + } + resp, err := s.GetResponsePaginate(AccountAPI, fmt.Sprintf("users/%s", userID), url.Values{}) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var user UsersDefinition + + if err = json.Unmarshal(body, &user); err != nil { + return nil, err + } + return &user.User, nil +} diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user_data.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user_data.go similarity index 79% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user_data.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user_data.go index c404959babb4..61608b8dbe17 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/user_data.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/user_data.go @@ -10,16 +10,16 @@ import ( "strings" ) -// ScalewayUserdatas represents the response of a GET /user_data -type ScalewayUserdatas struct { +// Userdatas represents the response of a GET /user_data +type Userdatas struct { UserData []string `json:"user_data"` } -// ScalewayUserdata represents []byte -type ScalewayUserdata []byte +// Userdata represents []byte +type Userdata []byte // GetUserdatas gets list of userdata for a server -func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUserdatas, error) { +func (s *API) GetUserdatas(serverID string, metadata bool) (*Userdatas, error) { var uri, endpoint string endpoint = s.computeAPI @@ -40,7 +40,7 @@ func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUse if err != nil { return nil, err } - var userdatas ScalewayUserdatas + var userdatas Userdatas if err = json.Unmarshal(body, &userdatas); err != nil { return nil, err @@ -48,12 +48,12 @@ func (s *ScalewayAPI) GetUserdatas(serverID string, metadata bool) (*ScalewayUse return &userdatas, nil } -func (s *ScalewayUserdata) String() string { +func (s *Userdata) String() string { return string(*s) } // GetUserdata gets a specific userdata for a server -func (s *ScalewayAPI) GetUserdata(serverID, key string, metadata bool) (*ScalewayUserdata, error) { +func (s *API) GetUserdata(serverID, key string, metadata bool) (*Userdata, error) { var uri, endpoint string endpoint = s.computeAPI @@ -74,13 +74,13 @@ func (s *ScalewayAPI) GetUserdata(serverID, key string, metadata bool) (*Scalewa if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("no such user_data %q (%d)", key, resp.StatusCode) } - var data ScalewayUserdata + var data Userdata data, err = ioutil.ReadAll(resp.Body) return &data, err } // PatchUserdata sets a user data -func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata bool) error { +func (s *API) PatchUserdata(serverID, key string, value []byte, metadata bool) error { var resource, endpoint string endpoint = s.computeAPI @@ -104,7 +104,7 @@ func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata req.Header.Set("Content-Type", "text/plain") req.Header.Set("User-Agent", s.userAgent) - resp, err := s.client.Do(req) + resp, err := s.Client.Do(req) if err != nil { return err } @@ -118,7 +118,7 @@ func (s *ScalewayAPI) PatchUserdata(serverID, key string, value []byte, metadata } // DeleteUserdata deletes a server user_data -func (s *ScalewayAPI) DeleteUserdata(serverID, key string, metadata bool) error { +func (s *API) DeleteUserdata(serverID, key string, metadata bool) error { var url, endpoint string endpoint = s.computeAPI diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/volume.go b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/volume.go similarity index 72% rename from vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/volume.go rename to vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/volume.go index e7b1adc9fd3f..e4284cce5646 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/api/volume.go +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/github.com/nicolai86/scaleway-sdk/volume.go @@ -7,8 +7,8 @@ import ( "net/url" ) -// ScalewayVolume represents a Scaleway Volume -type ScalewayVolume struct { +// Volume represents a Volume +type Volume struct { // Identifier is a unique identifier for the volume Identifier string `json:"id,omitempty"` @@ -33,7 +33,7 @@ type ScalewayVolume struct { Name string `json:"name,omitempty"` } `json:"server,omitempty"` - // VolumeType is a Scaleway identifier for the kind of volume (default: l_ssd) + // VolumeType is a identifier for the kind of volume (default: l_ssd) VolumeType string `json:"volume_type,omitempty"` // ExportURI represents the url used by initrd/scripts to attach the volume @@ -41,11 +41,11 @@ type ScalewayVolume struct { } type volumeResponse struct { - Volume ScalewayVolume `json:"volume,omitempty"` + Volume Volume `json:"volume,omitempty"` } -// ScalewayVolumeDefinition represents a Scaleway volume definition -type ScalewayVolumeDefinition struct { +// VolumeDefinition represents a volume definition +type VolumeDefinition struct { // Name is the user-defined name of the volume Name string `json:"name"` @@ -59,8 +59,8 @@ type ScalewayVolumeDefinition struct { Organization string `json:"organization"` } -// ScalewayVolumePutDefinition represents a Scaleway volume with nullable fields (for PUT) -type ScalewayVolumePutDefinition struct { +// VolumePutDefinition represents a volume with nullable fields (for PUT) +type VolumePutDefinition struct { Identifier *string `json:"id,omitempty"` Size *uint64 `json:"size,omitempty"` CreationDate *string `json:"creation_date,omitempty"` @@ -75,8 +75,8 @@ type ScalewayVolumePutDefinition struct { ExportURI *string `json:"export_uri,omitempty"` } -// PostVolume creates a new volume -func (s *ScalewayAPI) PostVolume(definition ScalewayVolumeDefinition) (string, error) { +// CreateVolume creates a new volume +func (s *API) CreateVolume(definition VolumeDefinition) (*Volume, error) { definition.Organization = s.Organization if definition.Type == "" { definition.Type = "l_ssd" @@ -84,36 +84,44 @@ func (s *ScalewayAPI) PostVolume(definition ScalewayVolumeDefinition) (string, e resp, err := s.PostResponse(s.computeAPI, "volumes", definition) if err != nil { - return "", err + return nil, err } defer resp.Body.Close() body, err := s.handleHTTPError([]int{http.StatusCreated}, resp) if err != nil { - return "", err + return nil, err } var volume volumeResponse if err = json.Unmarshal(body, &volume); err != nil { - return "", err + return nil, err } - return volume.Volume.Identifier, nil + return &volume.Volume, nil } -// PutVolume updates a volume -func (s *ScalewayAPI) PutVolume(volumeID string, definition ScalewayVolumePutDefinition) error { +// UpdateVolume updates a volume +func (s *API) UpdateVolume(volumeID string, definition VolumePutDefinition) (*Volume, error) { resp, err := s.PutResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID), definition) if err != nil { - return err + return nil, err } defer resp.Body.Close() - _, err = s.handleHTTPError([]int{http.StatusOK}, resp) - return err + body, err := s.handleHTTPError([]int{http.StatusOK}, resp) + if err != nil { + return nil, err + } + var volume volumeResponse + + if err = json.Unmarshal(body, &volume); err != nil { + return nil, err + } + return &volume.Volume, nil } // DeleteVolume deletes a volume -func (s *ScalewayAPI) DeleteVolume(volumeID string) error { +func (s *API) DeleteVolume(volumeID string) error { resp, err := s.DeleteResponse(s.computeAPI, fmt.Sprintf("volumes/%s", volumeID)) if err != nil { return err @@ -127,11 +135,11 @@ func (s *ScalewayAPI) DeleteVolume(volumeID string) error { } type volumesResponse struct { - Volumes []ScalewayVolume `json:"volumes,omitempty"` + Volumes []Volume `json:"volumes,omitempty"` } -// GetVolumes gets the list of volumes from the ScalewayAPI -func (s *ScalewayAPI) GetVolumes() (*[]ScalewayVolume, error) { +// GetVolumes gets the list of volumes from the API +func (s *API) GetVolumes() (*[]Volume, error) { query := url.Values{} resp, err := s.GetResponsePaginate(s.computeAPI, "volumes", query) @@ -153,8 +161,8 @@ func (s *ScalewayAPI) GetVolumes() (*[]ScalewayVolume, error) { return &volumes.Volumes, nil } -// GetVolume gets a volume from the ScalewayAPI -func (s *ScalewayAPI) GetVolume(volumeID string) (*ScalewayVolume, error) { +// GetVolume gets a volume from the API +func (s *API) GetVolume(volumeID string) (*Volume, error) { resp, err := s.GetResponsePaginate(s.computeAPI, "volumes/"+volumeID, url.Values{}) if err != nil { return nil, err @@ -165,11 +173,11 @@ func (s *ScalewayAPI) GetVolume(volumeID string) (*ScalewayVolume, error) { if err != nil { return nil, err } - var oneVolume volumeResponse + var volume volumeResponse - if err = json.Unmarshal(body, &oneVolume); err != nil { + if err = json.Unmarshal(body, &volume); err != nil { return nil, err } // FIXME region, arch, owner, title - return &oneVolume.Volume, nil + return &volume.Volume, nil } diff --git a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/vendor.json b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/vendor.json index 4d3bfab6fb76..dfe73626d801 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/vendor.json +++ b/vendor/github.com/hashicorp/go-discover/provider/scaleway/vendor/vendor.json @@ -2,7 +2,7 @@ "comment": "", "ignore": "test", "package": [ - {"path":"github.com/nicolai86/scaleway-sdk/api","checksumSHA1":"ROp7ZtwHdxYKpmwycOxbOOXNnzo=","revision":"33df10cad9fff60467a3dcb992d5340c2b9a8046","revisionTime":"2017-09-17T18:57:50Z"}, + {"path":"github.com/nicolai86/scaleway-sdk","checksumSHA1":"w5i5Tximdwrgf2TDbyVr0KmNO9Q=","revision":"798f60e20bb2466bc3d8f36f1cf4f98410e49b6a","revisionTime":"2018-06-28T01:02:48Z"}, {"path":"golang.org/x/net/context","checksumSHA1":"GtamqiJoL7PGHsN454AoffBFMa8=","revision":"c73622c77280266305273cb545f54516ced95b93","revisionTime":"2017-06-11T01:16:46Z"}, {"path":"golang.org/x/sync/errgroup","checksumSHA1":"S0DP7Pn7sZUmXc55IzZnNvERu6s=","revision":"8e0aa688b654ef28caa72506fa5ec8dba9fc7690","revisionTime":"2017-07-19T03:38:01Z"} ], diff --git a/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go b/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go index fc719bbfc9fa..00f0e25fcd0c 100644 --- a/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go +++ b/vendor/github.com/hashicorp/go-discover/provider/vsphere/vsphere_discover.go @@ -45,7 +45,7 @@ func setLog(l *log.Logger) { func discoverErr(format string, a ...interface{}) error { var s string if len(a) > 1 { - s = fmt.Sprintf(format, a) + s = fmt.Sprintf(format, a...) } else { s = format } diff --git a/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..469b44907a09 --- /dev/null +++ b/vendor/github.com/imdario/mergo/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/imdario/mergo/LICENSE b/vendor/github.com/imdario/mergo/LICENSE new file mode 100644 index 000000000000..686680298da2 --- /dev/null +++ b/vendor/github.com/imdario/mergo/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2013 Dario Castañé. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/imdario/mergo/README.md b/vendor/github.com/imdario/mergo/README.md new file mode 100644 index 000000000000..8b76f1fbf33a --- /dev/null +++ b/vendor/github.com/imdario/mergo/README.md @@ -0,0 +1,222 @@ +# Mergo + +A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements. + +Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche. + +## Status + +It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc](https://github.com/imdario/mergo#mergo-in-the-wild). + +[![GoDoc][3]][4] +[![GoCard][5]][6] +[![Build Status][1]][2] +[![Coverage Status][7]][8] +[![Sourcegraph][9]][10] + +[1]: https://travis-ci.org/imdario/mergo.png +[2]: https://travis-ci.org/imdario/mergo +[3]: https://godoc.org/github.com/imdario/mergo?status.svg +[4]: https://godoc.org/github.com/imdario/mergo +[5]: https://goreportcard.com/badge/imdario/mergo +[6]: https://goreportcard.com/report/github.com/imdario/mergo +[7]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master +[8]: https://coveralls.io/github/imdario/mergo?branch=master +[9]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg +[10]: https://sourcegraph.com/github.com/imdario/mergo?badge + +### Latest release + +[Release v0.3.6](https://github.com/imdario/mergo/releases/tag/v0.3.6). + +### Important note + +Please keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2) Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). An optional/variadic argument has been added, so it won't break existing code. + +If you were using Mergo **before** April 6th 2015, please check your project works as intended after updating your local copy with ```go get -u github.com/imdario/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause (I hope it won't!) in existing projects after the change (release 0.2.0). + +### Donations + +If Mergo is useful to you, consider buying me a coffee, a beer or making a monthly donation so I can keep building great free software. :heart_eyes: + +Buy Me a Coffee at ko-fi.com +[![Beerpay](https://beerpay.io/imdario/mergo/badge.svg)](https://beerpay.io/imdario/mergo) +[![Beerpay](https://beerpay.io/imdario/mergo/make-wish.svg)](https://beerpay.io/imdario/mergo) +Donate using Liberapay + +### Mergo in the wild + +- [moby/moby](https://github.com/moby/moby) +- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes) +- [vmware/dispatch](https://github.com/vmware/dispatch) +- [Shopify/themekit](https://github.com/Shopify/themekit) +- [imdario/zas](https://github.com/imdario/zas) +- [matcornic/hermes](https://github.com/matcornic/hermes) +- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go) +- [kataras/iris](https://github.com/kataras/iris) +- [michaelsauter/crane](https://github.com/michaelsauter/crane) +- [go-task/task](https://github.com/go-task/task) +- [sensu/uchiwa](https://github.com/sensu/uchiwa) +- [ory/hydra](https://github.com/ory/hydra) +- [sisatech/vcli](https://github.com/sisatech/vcli) +- [dairycart/dairycart](https://github.com/dairycart/dairycart) +- [projectcalico/felix](https://github.com/projectcalico/felix) +- [resin-os/balena](https://github.com/resin-os/balena) +- [go-kivik/kivik](https://github.com/go-kivik/kivik) +- [Telefonica/govice](https://github.com/Telefonica/govice) +- [supergiant/supergiant](supergiant/supergiant) +- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce) +- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy) +- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel) +- [EagerIO/Stout](https://github.com/EagerIO/Stout) +- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api) +- [russross/canvasassignments](https://github.com/russross/canvasassignments) +- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api) +- [casualjim/exeggutor](https://github.com/casualjim/exeggutor) +- [divshot/gitling](https://github.com/divshot/gitling) +- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl) +- [andrerocker/deploy42](https://github.com/andrerocker/deploy42) +- [elwinar/rambler](https://github.com/elwinar/rambler) +- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman) +- [jfbus/impressionist](https://github.com/jfbus/impressionist) +- [Jmeyering/zealot](https://github.com/Jmeyering/zealot) +- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host) +- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go) +- [thoas/picfit](https://github.com/thoas/picfit) +- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server) +- [jnuthong/item_search](https://github.com/jnuthong/item_search) +- [bukalapak/snowboard](https://github.com/bukalapak/snowboard) + +## Installation + + go get github.com/imdario/mergo + + // use in your .go code + import ( + "github.com/imdario/mergo" + ) + +## Usage + +You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are not considered zero values](https://golang.org/ref/spec#The_zero_value) either. Also maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection). + +```go +if err := mergo.Merge(&dst, src); err != nil { + // ... +} +``` + +Also, you can merge overwriting values using the transformer `WithOverride`. + +```go +if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil { + // ... +} +``` + +Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field. + +```go +if err := mergo.Map(&dst, srcMap); err != nil { + // ... +} +``` + +Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values. + +More information and examples in [godoc documentation](http://godoc.org/github.com/imdario/mergo). + +### Nice example + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" +) + +type Foo struct { + A string + B int64 +} + +func main() { + src := Foo{ + A: "one", + B: 2, + } + dest := Foo{ + A: "two", + } + mergo.Merge(&dest, src) + fmt.Println(dest) + // Will print + // {two 2} +} +``` + +Note: if test are failing due missing package, please execute: + + go get gopkg.in/yaml.v2 + +### Transformers + +Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`? + +```go +package main + +import ( + "fmt" + "github.com/imdario/mergo" + "reflect" + "time" +) + +type timeTransfomer struct { +} + +func (t timeTransfomer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error { + if typ == reflect.TypeOf(time.Time{}) { + return func(dst, src reflect.Value) error { + if dst.CanSet() { + isZero := dst.MethodByName("IsZero") + result := isZero.Call([]reflect.Value{}) + if result[0].Bool() { + dst.Set(src) + } + } + return nil + } + } + return nil +} + +type Snapshot struct { + Time time.Time + // ... +} + +func main() { + src := Snapshot{time.Now()} + dest := Snapshot{} + mergo.Merge(&dest, src, mergo.WithTransformers(timeTransfomer{})) + fmt.Println(dest) + // Will print + // { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 } +} +``` + + +## Contact me + +If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario) + +## About + +Written by [Dario Castañé](http://dario.im). + +## License + +[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE). diff --git a/vendor/github.com/imdario/mergo/doc.go b/vendor/github.com/imdario/mergo/doc.go new file mode 100644 index 000000000000..6e9aa7baf354 --- /dev/null +++ b/vendor/github.com/imdario/mergo/doc.go @@ -0,0 +1,44 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mergo merges same-type structs and maps by setting default values in zero-value fields. + +Mergo won't merge unexported (private) fields but will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection). + +Usage + +From my own work-in-progress project: + + type networkConfig struct { + Protocol string + Address string + ServerType string `json: "server_type"` + Port uint16 + } + + type FssnConfig struct { + Network networkConfig + } + + var fssnDefault = FssnConfig { + networkConfig { + "tcp", + "127.0.0.1", + "http", + 31560, + }, + } + + // Inside a function [...] + + if err := mergo.Merge(&config, fssnDefault); err != nil { + log.Fatal(err) + } + + // More code [...] + +*/ +package mergo diff --git a/vendor/github.com/imdario/mergo/map.go b/vendor/github.com/imdario/mergo/map.go new file mode 100644 index 000000000000..6ea38e636b64 --- /dev/null +++ b/vendor/github.com/imdario/mergo/map.go @@ -0,0 +1,174 @@ +// Copyright 2014 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" + "unicode" + "unicode/utf8" +) + +func changeInitialCase(s string, mapper func(rune) rune) string { + if s == "" { + return s + } + r, n := utf8.DecodeRuneInString(s) + return string(mapper(r)) + s[n:] +} + +func isExported(field reflect.StructField) bool { + r, _ := utf8.DecodeRuneInString(field.Name) + return r >= 'A' && r <= 'Z' +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + zeroValue := reflect.Value{} + switch dst.Kind() { + case reflect.Map: + dstMap := dst.Interface().(map[string]interface{}) + for i, n := 0, src.NumField(); i < n; i++ { + srcType := src.Type() + field := srcType.Field(i) + if !isExported(field) { + continue + } + fieldName := field.Name + fieldName = changeInitialCase(fieldName, unicode.ToLower) + if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v)) || overwrite) { + dstMap[fieldName] = src.Field(i).Interface() + } + } + case reflect.Ptr: + if dst.IsNil() { + v := reflect.New(dst.Type().Elem()) + dst.Set(v) + } + dst = dst.Elem() + fallthrough + case reflect.Struct: + srcMap := src.Interface().(map[string]interface{}) + for key := range srcMap { + srcValue := srcMap[key] + fieldName := changeInitialCase(key, unicode.ToUpper) + dstElement := dst.FieldByName(fieldName) + if dstElement == zeroValue { + // We discard it because the field doesn't exist. + continue + } + srcElement := reflect.ValueOf(srcValue) + dstKind := dstElement.Kind() + srcKind := srcElement.Kind() + if srcKind == reflect.Ptr && dstKind != reflect.Ptr { + srcElement = srcElement.Elem() + srcKind = reflect.TypeOf(srcElement.Interface()).Kind() + } else if dstKind == reflect.Ptr { + // Can this work? I guess it can't. + if srcKind != reflect.Ptr && srcElement.CanAddr() { + srcPtr := srcElement.Addr() + srcElement = reflect.ValueOf(srcPtr) + srcKind = reflect.Ptr + } + } + + if !srcElement.IsValid() { + continue + } + if srcKind == dstKind { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface { + if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else if srcKind == reflect.Map { + if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil { + return + } + } else { + return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind) + } + } + } + return +} + +// Map sets fields' values in dst from src. +// src can be a map with string keys or a struct. dst must be the opposite: +// if src is a map, dst must be a valid pointer to struct. If src is a struct, +// dst must be map[string]interface{}. +// It won't merge unexported (private) fields and will do recursively +// any exported field. +// If dst is a map, keys will be src fields' names in lower camel case. +// Missing key in src that doesn't match a field in dst will be skipped. This +// doesn't apply if dst is a map. +// This is separated method from Merge because it is cleaner and it keeps sane +// semantics: merging equal types, mapping different (restricted) types. +func Map(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, opts...) +} + +// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by +// non-empty src attribute values. +// Deprecated: Use Map(…) with WithOverride +func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return _map(dst, src, append(opts, WithOverride)...) +} + +func _map(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + // To be friction-less, we redirect equal-type arguments + // to deepMerge. Only because arguments can be anything. + if vSrc.Kind() == vDst.Kind() { + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) + } + switch vSrc.Kind() { + case reflect.Struct: + if vDst.Kind() != reflect.Map { + return ErrExpectedMapAsDestination + } + case reflect.Map: + if vDst.Kind() != reflect.Struct { + return ErrExpectedStructAsDestination + } + default: + return ErrNotSupported + } + return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/merge.go b/vendor/github.com/imdario/mergo/merge.go new file mode 100644 index 000000000000..44f70a89d919 --- /dev/null +++ b/vendor/github.com/imdario/mergo/merge.go @@ -0,0 +1,252 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "fmt" + "reflect" +) + +func hasExportedField(dst reflect.Value) (exported bool) { + for i, n := 0, dst.NumField(); i < n; i++ { + field := dst.Type().Field(i) + if field.Anonymous && dst.Field(i).Kind() == reflect.Struct { + exported = exported || hasExportedField(dst.Field(i)) + } else { + exported = exported || len(field.PkgPath) == 0 + } + } + return +} + +type Config struct { + Overwrite bool + AppendSlice bool + Transformers Transformers +} + +type Transformers interface { + Transformer(reflect.Type) func(dst, src reflect.Value) error +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) { + overwrite := config.Overwrite + + if !src.IsValid() { + return + } + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + + if config.Transformers != nil && !isEmptyValue(dst) { + if fn := config.Transformers.Transformer(dst.Type()); fn != nil { + err = fn(dst, src) + return + } + } + + switch dst.Kind() { + case reflect.Struct: + if hasExportedField(dst) { + for i, n := 0, dst.NumField(); i < n; i++ { + if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil { + return + } + } + } else { + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + case reflect.Map: + if dst.IsNil() && !src.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + for _, key := range src.MapKeys() { + srcElement := src.MapIndex(key) + if !srcElement.IsValid() { + continue + } + dstElement := dst.MapIndex(key) + switch srcElement.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice: + if srcElement.IsNil() { + continue + } + fallthrough + default: + if !srcElement.CanInterface() { + continue + } + switch reflect.TypeOf(srcElement.Interface()).Kind() { + case reflect.Struct: + fallthrough + case reflect.Ptr: + fallthrough + case reflect.Map: + srcMapElm := srcElement + dstMapElm := dstElement + if srcMapElm.CanInterface() { + srcMapElm = reflect.ValueOf(srcMapElm.Interface()) + if dstMapElm.IsValid() { + dstMapElm = reflect.ValueOf(dstMapElm.Interface()) + } + } + if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil { + return + } + case reflect.Slice: + srcSlice := reflect.ValueOf(srcElement.Interface()) + + var dstSlice reflect.Value + if !dstElement.IsValid() || dstElement.IsNil() { + dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len()) + } else { + dstSlice = reflect.ValueOf(dstElement.Interface()) + } + + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dstSlice = srcSlice + } else if config.AppendSlice { + if srcSlice.Type() != dstSlice.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", srcSlice.Type(), dstSlice.Type()) + } + dstSlice = reflect.AppendSlice(dstSlice, srcSlice) + } + dst.SetMapIndex(key, dstSlice) + } + } + if dstElement.IsValid() && reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map { + continue + } + + if srcElement.IsValid() && (overwrite || (!dstElement.IsValid() || isEmptyValue(dstElement))) { + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + dst.SetMapIndex(key, srcElement) + } + } + case reflect.Slice: + if !dst.CanSet() { + break + } + if !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) && !config.AppendSlice { + dst.Set(src) + } else if config.AppendSlice { + if src.Type() != dst.Type() { + return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type()) + } + dst.Set(reflect.AppendSlice(dst, src)) + } + case reflect.Ptr: + fallthrough + case reflect.Interface: + if src.IsNil() { + break + } + if src.Kind() != reflect.Interface { + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if src.Kind() == reflect.Ptr { + if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + } else if dst.Elem().Type() == src.Type() { + if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil { + return + } + } else { + return ErrDifferentArgumentsTypes + } + break + } + if dst.IsNil() || overwrite { + if dst.CanSet() && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } else if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil { + return + } + default: + if dst.CanSet() && !isEmptyValue(src) && (overwrite || isEmptyValue(dst)) { + dst.Set(src) + } + } + return +} + +// Merge will fill any empty for value type attributes on the dst struct using corresponding +// src attributes if they themselves are not empty. dst and src must be valid same-type structs +// and dst must be a pointer to struct. +// It won't merge unexported (private) fields and will do recursively any exported field. +func Merge(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, opts...) +} + +// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overriden by +// non-empty src attribute values. +// Deprecated: use Merge(…) with WithOverride +func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error { + return merge(dst, src, append(opts, WithOverride)...) +} + +// WithTransformers adds transformers to merge, allowing to customize the merging of some types. +func WithTransformers(transformers Transformers) func(*Config) { + return func(config *Config) { + config.Transformers = transformers + } +} + +// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values. +func WithOverride(config *Config) { + config.Overwrite = true +} + +// WithAppendSlice will make merge append slices instead of overwriting it +func WithAppendSlice(config *Config) { + config.AppendSlice = true +} + +func merge(dst, src interface{}, opts ...func(*Config)) error { + var ( + vDst, vSrc reflect.Value + err error + ) + + config := &Config{} + + for _, opt := range opts { + opt(config) + } + + if vDst, vSrc, err = resolveValues(dst, src); err != nil { + return err + } + if vDst.Type() != vSrc.Type() { + return ErrDifferentArgumentsTypes + } + return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config) +} diff --git a/vendor/github.com/imdario/mergo/mergo.go b/vendor/github.com/imdario/mergo/mergo.go new file mode 100644 index 000000000000..a82fea2fdccc --- /dev/null +++ b/vendor/github.com/imdario/mergo/mergo.go @@ -0,0 +1,97 @@ +// Copyright 2013 Dario Castañé. All rights reserved. +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Based on src/pkg/reflect/deepequal.go from official +// golang's stdlib. + +package mergo + +import ( + "errors" + "reflect" +) + +// Errors reported by Mergo when it finds invalid arguments. +var ( + ErrNilArguments = errors.New("src and dst must not be nil") + ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type") + ErrNotSupported = errors.New("only structs and maps are supported") + ErrExpectedMapAsDestination = errors.New("dst was expected to be a map") + ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct") +) + +// During deepMerge, must keep track of checks that are +// in progress. The comparison algorithm assumes that all +// checks in progress are true when it reencounters them. +// Visited are stored in a map indexed by 17 * a1 + a2; +type visit struct { + ptr uintptr + typ reflect.Type + next *visit +} + +// From src/pkg/encoding/json/encode.go. +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + if v.IsNil() { + return true + } + return isEmptyValue(v.Elem()) + case reflect.Func: + return v.IsNil() + case reflect.Invalid: + return true + } + return false +} + +func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) { + if dst == nil || src == nil { + err = ErrNilArguments + return + } + vDst = reflect.ValueOf(dst).Elem() + if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map { + err = ErrNotSupported + return + } + vSrc = reflect.ValueOf(src) + // We check if vSrc is a pointer to dereference it. + if vSrc.Kind() == reflect.Ptr { + vSrc = vSrc.Elem() + } + return +} + +// Traverses recursively both values, assigning src's fields values to dst. +// The map argument tracks comparisons that have already been seen, which allows +// short circuiting on recursive types. +func deeper(dst, src reflect.Value, visited map[uintptr]*visit, depth int) (err error) { + if dst.CanAddr() { + addr := dst.UnsafeAddr() + h := 17 * addr + seen := visited[h] + typ := dst.Type() + for p := seen; p != nil; p = p.next { + if p.ptr == addr && p.typ == typ { + return nil + } + } + // Remember, remember... + visited[h] = &visit{addr, typ, seen} + } + return // TODO refactor +} diff --git a/vendor/github.com/json-iterator/go/Gopkg.lock b/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 000000000000..c8a9fbb3871b --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/json-iterator/go/Gopkg.toml b/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 000000000000..313a0f887b6f --- /dev/null +++ b/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/vendor/github.com/json-iterator/go/LICENSE b/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 000000000000..2cf4f5ab28e9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/json-iterator/go/README.md b/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 000000000000..54d5afe9576d --- /dev/null +++ b/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,91 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +``` +Go开发者们请加入我们,滴滴出行平台技术部 taowen@didichuxing.com +``` + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 000000000000..e674d0f397ed --- /dev/null +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/vendor/github.com/json-iterator/go/any.go b/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 000000000000..daecfed615e7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,321 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/vendor/github.com/json-iterator/go/any_array.go b/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 000000000000..0449e9aa428a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_bool.go b/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 000000000000..9452324af5b1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/vendor/github.com/json-iterator/go/any_float.go b/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 000000000000..35fdb09497fa --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int32.go b/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 000000000000..1b56f399150d --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_int64.go b/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 000000000000..c440d72b6d3a --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_invalid.go b/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 000000000000..1d859eac3274 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_nil.go b/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 000000000000..d04cb54c11c1 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/vendor/github.com/json-iterator/go/any_number.go b/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 000000000000..9d1e901a66ad --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/vendor/github.com/json-iterator/go/any_object.go b/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 000000000000..c44ef5c989a4 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/vendor/github.com/json-iterator/go/any_str.go b/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 000000000000..a4b93c78c822 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint32.go b/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 000000000000..656bbd33d7ee --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/any_uint64.go b/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 000000000000..7df2fce33ba9 --- /dev/null +++ b/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/vendor/github.com/json-iterator/go/build.sh b/vendor/github.com/json-iterator/go/build.sh new file mode 100755 index 000000000000..b45ef688313e --- /dev/null +++ b/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/vendor/github.com/json-iterator/go/config.go b/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 000000000000..8c58fcba5922 --- /dev/null +++ b/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 000000000000..3095662b0610 --- /dev/null +++ b/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/vendor/github.com/json-iterator/go/iter.go b/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 000000000000..95ae54fbfe4d --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,322 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} diff --git a/vendor/github.com/json-iterator/go/iter_array.go b/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 000000000000..6188cb4577ab --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,58 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + return false + } + return true + } + return true + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/vendor/github.com/json-iterator/go/iter_float.go b/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 000000000000..4f883c0959f7 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,347 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + value := uint64(0) + c := byte(' ') + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value = uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber: + fallthrough + case dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/vendor/github.com/json-iterator/go/iter_int.go b/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 000000000000..2142320355e8 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/vendor/github.com/json-iterator/go/iter_object.go b/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 000000000000..1c575767130d --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,251 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return false + } + if !callback(iter, field) { + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + return false + } + return true + } + if c == '}' { + return true + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 000000000000..f58beb9137bf --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,129 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +type captureBuffer struct { + startedAt int + captured []byte +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = make([]byte, 0, 32) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + if len(captured) == 0 { + copied := make([]byte, len(remaining)) + copy(copied, remaining) + return copied + } + captured = append(captured, remaining...) + return captured +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 000000000000..8fcdc3b69bdf --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,144 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + case ']': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + case '}': // If close symbol, increase level + level-- + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/vendor/github.com/json-iterator/go/iter_skip_strict.go b/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 000000000000..f67bc2e83151 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,89 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import "fmt" + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + iter.ReadFloat32() + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/vendor/github.com/json-iterator/go/iter_str.go b/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 000000000000..adc487ea8048 --- /dev/null +++ b/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/vendor/github.com/json-iterator/go/jsoniter.go b/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 000000000000..c2934f916eb3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/vendor/github.com/json-iterator/go/pool.go b/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 000000000000..e2389b56cfff --- /dev/null +++ b/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect.go b/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 000000000000..4459e203fb85 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,332 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/reflect_array.go b/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 000000000000..13a0b7b0878c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_dynamic.go b/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 000000000000..8b6bc8b43328 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/vendor/github.com/json-iterator/go/reflect_extension.go b/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 000000000000..04f68756bf35 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag { + continue + } + tagParts := strings.Split(tag, ",") + if tag == "-" { + continue + } + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_number.go b/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 000000000000..98d45c1ec255 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 000000000000..f2619936c88f --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_map.go b/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 000000000000..7f66a88b043d --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,326 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(textMarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textMarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + subStream.buf = make([]byte, 0, 64) + key, elem := mapIter.UnsafeNext() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer() + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer(), + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/vendor/github.com/json-iterator/go/reflect_marshaler.go b/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 000000000000..58ac959ad8bf --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,218 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 000000000000..9042eb0cb989 --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,451 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + src := *((*[]byte)(ptr)) + if len(src) == 0 { + stream.WriteNil() + return + } + encoding := base64.StdEncoding + stream.writeByte('"') + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/vendor/github.com/json-iterator/go/reflect_optional.go b/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 000000000000..43ec71d6dadf --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/vendor/github.com/json-iterator/go/reflect_slice.go b/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 000000000000..9441d79df33b --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 000000000000..355d2d116b4c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1048 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + msg := "found unknown field: " + field + if decoder.disallowUnknownFields { + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 000000000000..d0759cf6418c --- /dev/null +++ b/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,210 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/vendor/github.com/json-iterator/go/stream.go b/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 000000000000..17662fdedcb5 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + n, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[n:] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) + stream.Flush() +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 000000000000..f318d2c59da3 --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,94 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/vendor/github.com/json-iterator/go/stream_int.go b/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 000000000000..d1059ee4c20e --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/vendor/github.com/json-iterator/go/stream_str.go b/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 000000000000..54c2ba0b3a2d --- /dev/null +++ b/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML